galera-4-26.4.25/tests/000755 000164 177776 00000000000 15107057160 015652 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/tests/run_sqlgen.sh000755 000164 177776 00000001541 15107057155 020373 0ustar00jenkinsnogroup000000 000000 #!/bin/bash -eu declare -r DIST_BASE=$(cd $(dirname $0); pwd -P) TEST_BASE=${TEST_BASE:-"$DIST_BASE"} . $TEST_BASE/conf/main.conf declare -r SCRIPTS="$DIST_BASE/scripts" . $SCRIPTS/jobs.sh . $SCRIPTS/action.sh . $SCRIPTS/kill.sh . $SCRIPTS/misc.sh check() { consistency_check $sqlgen_pid } #trap check SIGINT #node=1 #DBMS_HOST=${NODE_INCOMING_HOST[$node]} #DBMS_PORT=${NODE_INCOMING_PORT[$node]} # Start load SQLGEN=${SQLGEN:-"$DIST_BASE/bin/sqlgen"} LD_PRELOAD=$GLB_PRELOAD \ DYLD_INSERT_LIBRARIES=$GLB_PRELOAD \ DYLD_FORCE_FLAT_NAMESPACE=1 \ $SQLGEN --user $DBMS_TEST_USER --pswd $DBMS_TEST_PSWD --host $DBMS_HOST \ --port $DBMS_PORT --users $DBMS_CLIENTS --duration 999999999 \ --stat-interval 20 --sess-min 999999 --sess-max 999999 \ --rollbacks 0.1 # >/dev/null 2>$BASE_RUN/seesaw.err & #declare -r sqlgen_pid=$! #fg galera-4-26.4.25/tests/conf/000755 000164 177776 00000000000 15107057160 016577 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/tests/conf/galera-server-2.key000644 000164 177776 00000003250 15107057155 022213 0ustar00jenkinsnogroup000000 000000 -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDXvzFfKVjUIXI2 giBYDmabIUQe89dhCXiqu462Qx73j85bqw/gYbVishb0upeYWA/70clOLzAnr2pi gjrLltJFNZfIr3mnYwzLcVYTI6nGNiQ41uXoaQqKvelBrDM7PH7Uws4jkKJV5HAB jSUWinXXP9KeQrJJyCA6cTbOHV5xgLmWbizVxen6VXBZPbnN+rmg9jih9L8uHwhN Ma2r4uI2M9L5KzpulLQc8Wi5/hB+dRsLQnL8ZK0GnW7LIPxfrYZCJs3wOehQ33m3 HTahyDWuSqD15cPALCe24gdYTxm9osYvyikTRTJKPC0CmRWxzZmJtcUvEB/gpwlj lXQT4Ip1AgMBAAECggEAK0r7fkwOviqj/5onIWRqZJDNWaS2wIslAqW2Yo6fhS// SdfOzMjunAp+nsdeqjxpoK+dRKolcKE7qN7XK4ltUmQJvESyFcvDgoOQsTCD3fl5 VUqSQgvqCKNJTltewHFIRit9MBfS64d2jmkjWx2XvgTZn9ZkbnfRN8kGJWAxVncO 10ltBpt7Kbg4sFdYP6hNJoY2UKJFwAxto/jyX6uDtb1kSgyGRPDpsERGhKzYjj5z izrxeNa1JVPIBN3f2Sb0d+ZaIew46OSMpMkQ8xnKNXtEFH01T4m2+LFclAIM25MO kRpRl7UUF4RnSStyB2q89BTOqpr8jcoZ9oksdyp6dQKBgQD297oJTQT/tTDuixIP O5SyiLxzUXH6WfXB3cpxP5y8quzuzzsUTx7lspBzhh93/1J2btedTMW86eFsoqNI uf3nk8ZyhX6vJWfpqb1sIYh3yJBlN3GZfMweUzupmO4jU0MsfQayZ09cKuR3zUMt ujcYVmYZdgfLD6eKPDU0Q/osEwKBgQDfoyf+2/Yy1g+co2cVyqokyLL9a0jtY9t0 j/B3S9N2wIjSnb8CkYxMBwYh+dBwc/Eu+6LxLTGdtg029RU2daWzEKosG9Py2wsR LztHD9sxXZdRzc6W6LN6deIaSj0H01s3vcOgDm0++ap2ZjNyc85nhy0fieRkw3nJ xlqQqwgwVwKBgBPbBNz8kTtRwPZcvf31h1X3QldvI9/B3c1RXJZQS/SrNVNZ2+Ed H1nOSmjPTrZiE6or7S/bXUZ780C9rq0JLw2wRtMsQmwocLtLh0wrQgKHYHySwZJa gzqo7HINNpAmgI8SGji7r5i0Zhvvp8gEYauWPq0rXSMJRioJ/ykNkg57AoGAGWpZ aUVmPXDGZW7MkFVv8K4+aT6AEzp9/kk9ctFTPvOymP0EcC5KW3mQ4NubLKyAhG5k njQcp2fBKLXBq2bDZg5GyKyA8eCi6VkMy46pwnp4b/uLturLOueawpIdTX5fp34R dWcuUzHchYgn4KH4mxtprWPmaO0uMhgwwrAtRhECgYB2leEWwtWveznBv5e5ZnGJ Ku2Zyj+FCv332YmjQO4r77o4C5UI83FoRIAYAGBKUwZgRhrc/7bvzjCEWG21Iq0g 94Odtcrvz+NxqVYsm+IYkGPiqdOi9heSPFUZkq8qdieft3HZlYxCBUC9hxuiWCsT HYiY2+4qEjcnfLn8B/K/rQ== -----END PRIVATE KEY----- galera-4-26.4.25/tests/conf/common_my.cnf.tmpl000644 000164 177776 00000001236 15107057155 022245 0ustar00jenkinsnogroup000000 000000 [mysqld] core-file bind-address=127.0.1.1 binlog_cache_size=4096 innodb_flush_log_at_trx_commit=0 innodb_buffer_pool_size=64M innodb_log_file_size=64M innodb_locks_unsafe_for_binlog=1 innodb_lock_wait_timeout=50 innodb_autoinc_lock_mode=2 # query_cache_size=1M query_cache_type=1 max_connections=1024 max_connect_errors=4294967295 performance_schema=OFF binlog_format=ROW default-storage-engine=innodb wsrep_slave_threads=4 # Some regression tests use causal reads to determine if # all pending queries have been processed. Set to sufficiently high # time to avoid timing out too early. wsrep_provider_options="repl.causal_read_timeout=PT10H;evs.info_log_mask=0x3" galera-4-26.4.25/tests/conf/my.cnf.1.tmpl000644 000164 177776 00000000136 15107057155 021032 0ustar00jenkinsnogroup000000 000000 wsrep_sst_method=rsync wsrep_sst_auth=root:rootpass wsrep_sst_receive_address=127.0.1.1:10013 galera-4-26.4.25/tests/conf/galera-server-3.pem000644 000164 177776 00000012411 15107057155 022204 0ustar00jenkinsnogroup000000 000000 Certificate: Data: Version: 3 (0x2) Serial Number: 2f:5c:d5:66:b6:b8:4f:56:0f:d9:f8:25:9b:b1:ae:5e Signature Algorithm: sha256WithRSAEncryption Issuer: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=Galera Int/emailAddress=devel@galeracluster.com Validity Not Before: Jan 21 10:30:06 2021 GMT Not After : Jan 6 10:30:06 2024 GMT Subject: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=galera-server-3/emailAddress=devel@galeracluster.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:b4:83:c2:ec:a8:91:e8:31:ab:07:05:97:95:c9: d6:82:a1:8e:47:96:43:83:fc:f8:c9:e4:73:e7:b8: 9d:61:22:20:44:65:b5:8f:59:c8:3d:b1:a5:84:f7: 7c:79:2b:23:b4:dc:aa:a0:b2:a2:2d:d9:a4:46:fd: 4c:28:61:83:b5:67:31:70:81:3f:15:a0:38:19:e8: 50:47:82:37:4f:06:d0:f2:db:57:ef:b1:b9:44:2f: dd:70:a8:b6:44:31:69:53:70:cd:76:24:ab:3a:35: be:68:23:8c:30:52:2a:d5:45:f2:16:5c:62:bd:ba: bc:f0:d8:52:b6:97:1f:38:99:c2:b1:d2:1c:bf:cf: 76:77:10:0c:f3:68:53:e8:a2:a8:9c:89:6b:d2:62: 7e:d6:d5:0a:73:d6:bf:f3:35:d9:20:9c:ec:5e:4d: c1:08:68:2f:9f:ac:e6:22:49:f8:31:99:78:1f:73: e1:92:13:a7:79:05:c1:b8:91:be:fb:25:a0:3d:ac: 4f:a8:33:bf:53:6e:5d:0d:83:e2:03:d9:03:d2:f2: df:b3:de:b1:2e:b5:a1:0e:25:52:15:ad:e1:9b:76: 46:7c:f9:c0:87:00:04:8c:a8:32:f1:39:5f:6e:f3: a1:05:36:6d:ab:82:21:0f:cd:18:37:ec:ad:f2:48: bc:c5 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE X509v3 Subject Key Identifier: 40:AA:07:8D:B7:E6:BA:A8:F1:46:89:E9:4F:5F:9D:8D:B1:40:97:1D X509v3 Authority Key Identifier: keyid:C7:1F:75:96:5D:5A:4A:CD:71:AB:39:78:3E:1D:80:2D:B4:13:C2:FF DirName:/C=FI/ST=Uusimaa/L=Helsinki/O=Codership Oy/OU=Galera Devel/CN=Galera Root CA/emailAddress=devel@galeracluster.com serial:92:CE:97:05:D3:DA:44:B3:CE:1E:D7:DA:F9:FE:E0:90 X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Subject Alternative Name: DNS:galera-server-3 Signature Algorithm: sha256WithRSAEncryption 1f:b7:e2:94:f0:23:0f:26:39:b6:e2:f4:68:ca:47:0d:1e:5c: 49:bb:19:14:d8:8b:3f:88:d9:05:c6:69:9a:bb:d4:61:ef:19: dd:e2:c8:68:26:77:0c:fc:7d:fc:00:75:de:01:b9:98:cd:10: 23:18:b9:e0:6b:0e:73:5d:63:3f:22:95:18:df:11:04:0d:3a: 43:4e:7e:3b:75:ee:9f:cd:1c:4d:05:92:c4:8b:3c:02:22:f5: ec:1a:50:32:ff:a1:5b:cf:8d:07:4a:a4:97:a1:bf:6a:5c:b1: 24:ef:2b:d6:0d:7c:70:1d:58:35:8b:da:e3:37:be:cc:94:cb: 2a:05:aa:32:de:a6:00:9c:c4:f6:cb:ec:0a:fe:cc:2f:dc:e7: 76:7f:61:30:23:ae:fe:ff:f6:57:34:9c:3d:8f:ff:b3:0a:6f: 7b:f9:fc:e2:5b:aa:17:a2:8d:10:5f:46:ee:23:98:a9:06:b9: 2c:11:52:fe:14:ed:02:4c:85:af:06:d0:f7:bb:f8:aa:d3:9b: 0f:5c:5c:10:df:7c:a8:ee:6a:c7:b9:67:e3:9e:fe:cc:24:66: 08:3b:6f:80:53:1d:99:e5:94:a0:d9:bc:27:ab:1a:26:c7:04: 0e:d2:3e:5c:5d:ac:7d:06:1a:de:d5:28:bd:b5:96:23:59:e2: 9b:2a:cb:0a -----BEGIN CERTIFICATE----- MIIFBTCCA+2gAwIBAgIQL1zVZra4T1YP2fglm7GuXjANBgkqhkiG9w0BAQsFADCB nTELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDETMBEGA1UEAwwKR2FsZXJhIEludDEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2Fs ZXJhY2x1c3Rlci5jb20wHhcNMjEwMTIxMTAzMDA2WhcNMjQwMTA2MTAzMDA2WjCB ojELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDEYMBYGA1UEAwwPZ2FsZXJhLXNlcnZlci0zMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBALSDwuyokegxqwcFl5XJ1oKhjkeWQ4P8+Mnkc+e4nWEiIERltY9ZyD2xpYT3 fHkrI7TcqqCyoi3ZpEb9TChhg7VnMXCBPxWgOBnoUEeCN08G0PLbV++xuUQv3XCo tkQxaVNwzXYkqzo1vmgjjDBSKtVF8hZcYr26vPDYUraXHziZwrHSHL/PdncQDPNo U+iiqJyJa9JiftbVCnPWv/M12SCc7F5NwQhoL5+s5iJJ+DGZeB9z4ZITp3kFwbiR vvsloD2sT6gzv1NuXQ2D4gPZA9Ly37PesS61oQ4lUhWt4Zt2Rnz5wIcABIyoMvE5 X27zoQU2bauCIQ/NGDfsrfJIvMUCAwEAAaOCATgwggE0MAkGA1UdEwQCMAAwHQYD VR0OBBYEFECqB4235rqo8UaJ6U9fnY2xQJcdMIHeBgNVHSMEgdYwgdOAFMcfdZZd WkrNcas5eD4dgC20E8L/oYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UECAwH VXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBP eTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9vdCBD QTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CEQCSzpcF 09pEs84e19r5/uCQMAsGA1UdDwQEAwIFoDAaBgNVHREEEzARgg9nYWxlcmEtc2Vy dmVyLTMwDQYJKoZIhvcNAQELBQADggEBAB+34pTwIw8mObbi9GjKRw0eXEm7GRTY iz+I2QXGaZq71GHvGd3iyGgmdwz8ffwAdd4BuZjNECMYueBrDnNdYz8ilRjfEQQN OkNOfjt17p/NHE0FksSLPAIi9ewaUDL/oVvPjQdKpJehv2pcsSTvK9YNfHAdWDWL 2uM3vsyUyyoFqjLepgCcxPbL7Ar+zC/c53Z/YTAjrv7/9lc0nD2P/7MKb3v5/OJb qheijRBfRu4jmKkGuSwRUv4U7QJMha8G0Pe7+KrTmw9cXBDffKjuase5Z+Oe/swk Zgg7b4BTHZnllKDZvCerGibHBA7SPlxdrH0GGt7VKL21liNZ4psqywo= -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/main.conf000644 000164 177776 00000002403 15107057155 020375 0ustar00jenkinsnogroup000000 000000 #declare -r BASE_LOG="$TEST_BASE/log" declare -r BASE_OUT="$TEST_BASE/out" mkdir -p "$BASE_OUT" declare -r BASE_RUN="$TEST_BASE/run" mkdir -p "$BASE_RUN" declare -r BASE_CONF="$TEST_BASE/conf" . ${CLUSTER_CONF:-"$BASE_CONF/cluster.conf"} if [ "$(uname -s)" == "Darwin" ]; then GLB_PRELOAD=${DYLD_INSERT_LIBRARIES:-""} else GLB_PRELOAD=${LD_PRELOAD:-""} fi export GLB_PRELOAD # The code below tries to find available libglb.so and if found, export # necessary variables for client side load balancing GLB_LIB=${GLB_LIB:-""} if [ -z "$GLB_LIB" ] then if [ -r /usr/local/lib/libglb.so ] then GLB_LIB="/usr/local/lib/libglb.so" elif [ -r /usr/lib/libglb.so ] then GLB_LIB="/usr/lib/libglb.so" fi fi if [ -r "$GLB_LIB" ] then if [ -n "$GLB_PRELOAD" ] then export GLB_PRELOAD="$GLB_LIB:$GLB_PRELOAD" else export GLB_PRELOAD="$GLB_LIB" fi export GLB_BIND=$DBMS_HOST:$DBMS_PORT GLB_TARGETS="" for node in $NODE_LIST do target=${NODE_INCOMING_HOST[$node]}:${NODE_INCOMING_PORT[$node]} if [ $node -ne $NODE_MAX ] then GLB_TARGETS="$GLB_TARGETS$target," else GLB_TARGETS="$GLB_TARGETS$target" fi done export GLB_TARGETS fi galera-4-26.4.25/tests/conf/galera-ca.pem000644 000164 177776 00000003365 15107057155 021131 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIE9jCCA96gAwIBAgIUeSFiqOH8MAdfxA+Wb5QFN6tSvDMwDQYJKoZIhvcNAQEL BQAwgaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhI ZWxzaW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEg RGV2ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYwJAYJKoZIhvcNAQkBFhdk ZXZlbEBnYWxlcmFjbHVzdGVyLmNvbTAeFw0yMTAxMjExMDMwMDZaFw0zMTAxMTkx MDMwMDZaMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UECAwHVXVzaW1hYTERMA8GA1UE BwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBPeTEVMBMGA1UECwwMR2Fs ZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9vdCBDQTEmMCQGCSqGSIb3DQEJ ARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB DwAwggEKAoIBAQDMIRmpl7aEqxnbnl6shlFTMvIUexTj15guXvRD5XHLxBcWgs7O cdb1gnZJWk+S1hlvxZZl3qVsxGbk2TPvbmDkCmJjm4NsT0OP8DRnjG+vrb+r04Q4 eItqSKryA5K/Tx1mWR6Fli5gwr8m054ZqJqAn84x8YlcoXswmOI4mbhTxP6H8S/S D14xlusZgDyZZJkZdC31zIsth1Z4+v7X5HzNvkSD1Rf0NkLtYsBZA57dTsiQ+pYr QNcrxa2GEisWqFgttQLNy2FIYNIoS4fr6hxDaGMaAopSdnHkMiIr6WMIDCb1SOC4 JQwAI29dpFlEfg2DIB+kkN6hd8cHvRi7idTTAgMBAAGjggEiMIIBHjAdBgNVHQ4E FgQUxBvY+4ary5U4O2s6Xm8a+E//SvAwgeEGA1UdIwSB2TCB1oAUxBvY+4ary5U4 O2s6Xm8a+E//SvChgaekgaQwgaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNp bWFhMREwDwYDVQQHDAhIZWxzaW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUw EwYDVQQLDAxHYWxlcmEgRGV2ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYw JAYJKoZIhvcNAQkBFhdkZXZlbEBnYWxlcmFjbHVzdGVyLmNvbYIUeSFiqOH8MAdf xA+Wb5QFN6tSvDMwDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAQYwDQYJKoZIhvcN AQELBQADggEBACt0h/vwwQLYcU5h3JQ/aQSayl1BpGyzwasKjcpjOut1peTHQG+J 0Znu/+YFhU+eTF8cbxBT4dAtHqOno+RVoDcroyDb+mdyN+5+j/ks+pN5aZ8GnsM9 IWx8fSqSu8LCU/AJt3hOwmPHRBThgBHupOBstycjZsh+s7mCt04CVC+O0ypwFe5j Fzq8bWed0rc/kfdqWTVcW3spo8OkSNV6BwPBa2UqjyrFcStCjZcBhLvw8K6qBfzC pVlycfrXpzz6zT+eMMLiiG4EE6LXogVWjvcpWELQ+56V3D47bCD3c7m/kWHVQ4U4 o/TH5K4g9Tkeuo6b77EystaG3HhuEaENJrM= -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/galera-server-2.pem000644 000164 177776 00000012411 15107057155 022203 0ustar00jenkinsnogroup000000 000000 Certificate: Data: Version: 3 (0x2) Serial Number: 09:75:89:09:2c:15:4a:59:e1:44:80:6c:65:1a:a1:a1 Signature Algorithm: sha256WithRSAEncryption Issuer: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=Galera Int/emailAddress=devel@galeracluster.com Validity Not Before: Jan 21 10:30:06 2021 GMT Not After : Jan 6 10:30:06 2024 GMT Subject: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=galera-server-2/emailAddress=devel@galeracluster.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:d7:bf:31:5f:29:58:d4:21:72:36:82:20:58:0e: 66:9b:21:44:1e:f3:d7:61:09:78:aa:bb:8e:b6:43: 1e:f7:8f:ce:5b:ab:0f:e0:61:b5:62:b2:16:f4:ba: 97:98:58:0f:fb:d1:c9:4e:2f:30:27:af:6a:62:82: 3a:cb:96:d2:45:35:97:c8:af:79:a7:63:0c:cb:71: 56:13:23:a9:c6:36:24:38:d6:e5:e8:69:0a:8a:bd: e9:41:ac:33:3b:3c:7e:d4:c2:ce:23:90:a2:55:e4: 70:01:8d:25:16:8a:75:d7:3f:d2:9e:42:b2:49:c8: 20:3a:71:36:ce:1d:5e:71:80:b9:96:6e:2c:d5:c5: e9:fa:55:70:59:3d:b9:cd:fa:b9:a0:f6:38:a1:f4: bf:2e:1f:08:4d:31:ad:ab:e2:e2:36:33:d2:f9:2b: 3a:6e:94:b4:1c:f1:68:b9:fe:10:7e:75:1b:0b:42: 72:fc:64:ad:06:9d:6e:cb:20:fc:5f:ad:86:42:26: cd:f0:39:e8:50:df:79:b7:1d:36:a1:c8:35:ae:4a: a0:f5:e5:c3:c0:2c:27:b6:e2:07:58:4f:19:bd:a2: c6:2f:ca:29:13:45:32:4a:3c:2d:02:99:15:b1:cd: 99:89:b5:c5:2f:10:1f:e0:a7:09:63:95:74:13:e0: 8a:75 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE X509v3 Subject Key Identifier: B9:CD:B9:3B:01:07:CE:D8:5E:37:96:1E:6B:A7:34:E0:78:60:73:16 X509v3 Authority Key Identifier: keyid:C7:1F:75:96:5D:5A:4A:CD:71:AB:39:78:3E:1D:80:2D:B4:13:C2:FF DirName:/C=FI/ST=Uusimaa/L=Helsinki/O=Codership Oy/OU=Galera Devel/CN=Galera Root CA/emailAddress=devel@galeracluster.com serial:92:CE:97:05:D3:DA:44:B3:CE:1E:D7:DA:F9:FE:E0:90 X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Subject Alternative Name: DNS:galera-server-2 Signature Algorithm: sha256WithRSAEncryption 95:f7:c7:6c:dc:2b:7f:b2:f9:d8:24:54:de:80:66:36:90:7c: 52:42:ea:47:86:af:0d:81:e3:19:e5:f8:6b:f7:c7:47:17:64: 57:0d:8e:30:20:f6:23:60:cd:4e:b3:e8:d7:be:b0:07:42:e0: 24:04:46:22:ec:6f:c8:4a:40:03:46:d3:08:08:1e:9f:47:1a: 8c:fa:ab:eb:2c:3b:9a:6c:8c:72:95:23:0f:55:dd:6b:0d:65: d2:4a:da:45:58:8f:e5:5c:48:b7:dc:4a:63:ba:58:8d:7b:37: 26:6e:4a:fe:9d:f3:c4:52:d8:e5:6c:1c:26:7f:6b:98:f0:b9: 51:7a:e8:d5:33:fc:b1:77:25:3a:ac:88:ab:23:81:d7:60:9c: 53:b2:9b:2b:72:62:ac:52:e9:8e:70:80:1a:ca:8e:54:d4:f8: 75:56:7d:36:9e:12:9a:33:58:35:4b:d0:ad:fd:52:b1:18:f0: cd:89:08:12:9c:65:a8:ea:ee:57:87:62:66:1c:f4:d7:40:4d: 5c:88:9b:fb:74:84:02:58:31:1a:cc:93:1c:86:bd:44:f7:6a: bc:24:07:01:f5:49:85:0d:a8:7a:dd:55:99:e7:35:be:68:8c: ee:88:28:4d:51:90:d5:2b:61:16:a1:a2:e2:7f:ea:e6:c6:6f: 6d:d3:43:a7 -----BEGIN CERTIFICATE----- MIIFBTCCA+2gAwIBAgIQCXWJCSwVSlnhRIBsZRqhoTANBgkqhkiG9w0BAQsFADCB nTELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDETMBEGA1UEAwwKR2FsZXJhIEludDEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2Fs ZXJhY2x1c3Rlci5jb20wHhcNMjEwMTIxMTAzMDA2WhcNMjQwMTA2MTAzMDA2WjCB ojELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDEYMBYGA1UEAwwPZ2FsZXJhLXNlcnZlci0yMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBANe/MV8pWNQhcjaCIFgOZpshRB7z12EJeKq7jrZDHvePzlurD+BhtWKyFvS6 l5hYD/vRyU4vMCevamKCOsuW0kU1l8iveadjDMtxVhMjqcY2JDjW5ehpCoq96UGs Mzs8ftTCziOQolXkcAGNJRaKddc/0p5CsknIIDpxNs4dXnGAuZZuLNXF6fpVcFk9 uc36uaD2OKH0vy4fCE0xravi4jYz0vkrOm6UtBzxaLn+EH51GwtCcvxkrQadbssg /F+thkImzfA56FDfebcdNqHINa5KoPXlw8AsJ7biB1hPGb2ixi/KKRNFMko8LQKZ FbHNmYm1xS8QH+CnCWOVdBPginUCAwEAAaOCATgwggE0MAkGA1UdEwQCMAAwHQYD VR0OBBYEFLnNuTsBB87YXjeWHmunNOB4YHMWMIHeBgNVHSMEgdYwgdOAFMcfdZZd WkrNcas5eD4dgC20E8L/oYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UECAwH VXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBP eTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9vdCBD QTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CEQCSzpcF 09pEs84e19r5/uCQMAsGA1UdDwQEAwIFoDAaBgNVHREEEzARgg9nYWxlcmEtc2Vy dmVyLTIwDQYJKoZIhvcNAQELBQADggEBAJX3x2zcK3+y+dgkVN6AZjaQfFJC6keG rw2B4xnl+Gv3x0cXZFcNjjAg9iNgzU6z6Ne+sAdC4CQERiLsb8hKQANG0wgIHp9H Goz6q+ssO5psjHKVIw9V3WsNZdJK2kVYj+VcSLfcSmO6WI17NyZuSv6d88RS2OVs HCZ/a5jwuVF66NUz/LF3JTqsiKsjgddgnFOymytyYqxS6Y5wgBrKjlTU+HVWfTae EpozWDVL0K39UrEY8M2JCBKcZajq7leHYmYc9NdATVyIm/t0hAJYMRrMkxyGvUT3 arwkBwH1SYUNqHrdVZnnNb5ojO6IKE1RkNUrYRahouJ/6ubGb23TQ6c= -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/my.cnf.3.tmpl000644 000164 177776 00000000141 15107057155 021030 0ustar00jenkinsnogroup000000 000000 wsrep_sst_method=mysqldump wsrep_sst_auth=root:rootpass wsrep_sst_receive_address=127.0.1.1:3313 galera-4-26.4.25/tests/conf/bundle-galera-server-1.pem000644 000164 177776 00000006766 15107057155 023471 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIFBjCCA+6gAwIBAgIRANX3xNMZ4WjEwWtKpUNiSpUwDQYJKoZIhvcNAQELBQAw gZ0xCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxEzARBgNVBAMMCkdhbGVyYSBJbnQxJjAkBgkqhkiG9w0BCQEWF2RldmVsQGdh bGVyYWNsdXN0ZXIuY29tMB4XDTIxMDEyMTEwMzAwNloXDTI0MDEwNjEwMzAwNlow gaIxCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxGDAWBgNVBAMMD2dhbGVyYS1zZXJ2ZXItMTEmMCQGCSqGSIb3DQEJARYXZGV2 ZWxAZ2FsZXJhY2x1c3Rlci5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQCm59B7kWp5wahhLZ6dFGiKsCygOryOVJKvGkMnpZOp5UsBXPMPGHLQSRTS vefDg7gHc1O3pqy+KNEb4ckrndwUqHRIYbqOeq4eM+yoxv2EHFBBFbnyUATnOoKd Yfjypyuu7UWuDjSwuy4oDTxFpy7WbtaL1zZiRRL1zK4G3xV1ZL330B3cGilfT0yd jBEKfEPJDepi41ZbY7p8t5KCNvArI8FQGgvcdZIc+wsUEN/PXE3XnGy382/Fn9ap Ym0DhlgszIqTCxqRZ4G9En5WqeXgP6EWh1aOEuSPNTTHXh30pF54YsNlaoWx7gTp pEqLPpyPwHJ5AwNKPkqZQzxJ0GHnAgMBAAGjggE4MIIBNDAJBgNVHRMEAjAAMB0G A1UdDgQWBBQUmjxdaRPYe+heki+wRTJHUvKkhTCB3gYDVR0jBIHWMIHTgBTHH3WW XVpKzXGrOXg+HYAttBPC/6GBp6SBpDCBoTELMAkGA1UEBhMCRkkxEDAOBgNVBAgM B1V1c2ltYWExETAPBgNVBAcMCEhlbHNpbmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAg T3kxFTATBgNVBAsMDEdhbGVyYSBEZXZlbDEXMBUGA1UEAwwOR2FsZXJhIFJvb3Qg Q0ExJjAkBgkqhkiG9w0BCQEWF2RldmVsQGdhbGVyYWNsdXN0ZXIuY29tghEAks6X BdPaRLPOHtfa+f7gkDALBgNVHQ8EBAMCBaAwGgYDVR0RBBMwEYIPZ2FsZXJhLXNl cnZlci0xMA0GCSqGSIb3DQEBCwUAA4IBAQCkrliX0wZ/+FZSbLBW8de2Z7h6H2UC r0Uc/DtdVWRAW1xR3TomZEo4LP6rdBK18grD6Fg/FhW4l2KlNNHoS2n4fOWv9wBn L0NQ65RYfxBzYyq5XB0nEN2Y6FBTJFowTrr02zMlnw81kpDWU+FbdMu9EwsuTNG/ Nmi7cP8e2Iefd1TQ2RUEl+WXJZLv5upLEnRupcl+Rar5PuR+ZaNo7tES5zNeZ8/e SNDAwNBFAO+oCDDG/z4JiK15muw14KW9PTedMxHwX4T76BhntcsmbyXBGy79wtKY 0DxUFdEdBXPFLb4A78/nUwpPt1Em+bgxUFb56velOq3Kkwgdky3TKazo -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIE7zCCA9egAwIBAgIRAJLOlwXT2kSzzh7X2vn+4JAwDQYJKoZIhvcNAQELBQAw gaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTAeFw0yMTAxMjExMDMwMDZaFw0yNDAxMDYxMDMw MDZaMIGdMQswCQYDVQQGEwJGSTEQMA4GA1UECAwHVXVzaW1hYTERMA8GA1UEBwwI SGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBPeTEVMBMGA1UECwwMR2FsZXJh IERldmVsMRMwEQYDVQQDDApHYWxlcmEgSW50MSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBAM67vDBE4ALnYbfDKtAphdd/rFSmtiaOmZcl58RAPpaaQQ/jxLwNvVgsaCh/ 2boWBXhGNCzB175sxcK2XkKEn9MHENARKDzfdFoIQjPqQyZxX6CupYOdnA/B9+l/ +6uW7Iu+N6UP+IUeW/ElWzbIh5k/mpzOr16r/MCmxD9dvB7i1C5+bvXB6lasN2ot tldw6cQtrzGBmYRl3f/hUq3j9gwrPm3SVfuoEPCoUetobgUZbKdi0jHuYRi4y60T +Vn7Xx1fcPz9qwyWJHtw9ERr/6KWhxOk9+rxiUmNIah337p+nOe282lCmnI+hMLo CSeyjlPVzIxQY5KjTFW4zu6wTWkCAwEAAaOCASIwggEeMAwGA1UdEwQFMAMBAf8w HQYDVR0OBBYEFMcfdZZdWkrNcas5eD4dgC20E8L/MIHhBgNVHSMEgdkwgdaAFMQb 2PuGq8uVODtrOl5vGvhP/0rwoYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UE CAwHVXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hp cCBPeTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9v dCBDQTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CFHkh Yqjh/DAHX8QPlm+UBTerUrwzMAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOC AQEAaJuugEguz+T4V8umj0zAv/yfWOw+647scCJpfF1P2q2GSTDGG3LkjrDuKMK+ zp0hAHdXhDD8XDOh2q6eMbUTCVUFqnM6ss4os/HyK5f5UCv3gJaSXJm3GAGgmZze HsdDPRTePp1Mr21EjChsWEUAb6EKA5F6ezUVglFwi4uD00FNYA/If7mbizomohS9 JvXPhriy9cB1jaLgP5UOl1tT3CJUjNY2Jdk3RKOIEthUaHxY7xnJVLGrLQrMBjG7 dKPuxucxvpKBAMhqnLdNVBYg3wb7WAzAMVedUrJWpXaWzWIsjLtadt6M3kCwYKtS h3vB5D/rqXSP0pdm50ihb3yRHA== -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/galera-server-1.key000644 000164 177776 00000003250 15107057155 022212 0ustar00jenkinsnogroup000000 000000 -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCm59B7kWp5wahh LZ6dFGiKsCygOryOVJKvGkMnpZOp5UsBXPMPGHLQSRTSvefDg7gHc1O3pqy+KNEb 4ckrndwUqHRIYbqOeq4eM+yoxv2EHFBBFbnyUATnOoKdYfjypyuu7UWuDjSwuy4o DTxFpy7WbtaL1zZiRRL1zK4G3xV1ZL330B3cGilfT0ydjBEKfEPJDepi41ZbY7p8 t5KCNvArI8FQGgvcdZIc+wsUEN/PXE3XnGy382/Fn9apYm0DhlgszIqTCxqRZ4G9 En5WqeXgP6EWh1aOEuSPNTTHXh30pF54YsNlaoWx7gTppEqLPpyPwHJ5AwNKPkqZ QzxJ0GHnAgMBAAECggEAY371jXQWYRbM5YFoeINd/q8fKzVYihBokPG67ruJ11HU 8K8URf9xEjE+tJJ4wtiWr/pUAbreZ021ukVSwymLtNTm3O9FYwJHIcIaZlKZdlPa k7H78zM4eQ8sJd3tpdl59QGE9EmLp38t+LpLH8qV5UaVpy9YQCSlNNobNAVwqVW5 0KrxIM4tITyuvIbmn7JWkeaWi2NZjYNus80K5uEve5tYGzchA6ejuCw082pi4pHk bvl3b381l3OCQyQIegeRMp1aN3xUKG/dCW+iaHi/+io40NJQEI/n35ExLLo9xYts PBQACnPQ1JiJ2ktXVa9Rvm7KIKKHravHh31sjzUxoQKBgQDdcTDBG7YkOU8hcb/+ 1ggtGQrIeHTuhXwO5WUhNL5wqrbi97VpSji+fa5wYLULDAtX2VdG98gyPoNk324A u8LSGka/gqvj8UhV9Pgg9m38aHfsfSvy3Cvsgh+Kgrkia8HgQKqixk3KPvcK/jOs G+macG7lOfBiSh/5DXNhW8v/owKBgQDA89ZrU4rMnaQDD463p/UwDtXCQooNPuYL Nnmi12QQfn5M8A2j48ZbvFdX5FnS8rnw+zebYMVTct6GCEuGDuLgdXOLjMcJTVkj s0LVyYco9RQ/Vq7CiFvC0hPMnihpsyDqUFsUIRDdDGtjX3483zVMDR/1GNO4nXBM OutgSSPo7QKBgDJI28teIZREN0Xe8LxLmfuzrhEr2VG4mh0/1iEeOwiWm4UavuUz 54LKQ2xdaX8iswi13+3LonhWXbvwSKh9+pV/RgWDBMl0Jvrt79J8YyloB6N6IRq7 CHZibgsj+Cpq0dG8nYLtCQkeFzc11kIE7J0XXvdKmt2W+3+woDzBfaeFAoGBAKmv RieIho2/LFdMvTOhzyv+P9ngbkuLvv6pX3rndKZUt3mtujEgxHY0QZOXy66Jqx/T rWlFRwNGB49TiCdSvA6s+3oDyZ3Smcudf+9GlUhdjvrMnk+RXzELUkIl7GBYGWA2 XoJrOctLer0fY4LgwJwrBqmitb6rJfDD9+7rI1tZAoGBAKKEOygxVoB+2o3dLpua QsoeygGZG+IWeyJTERF/dOf3g0t36I/liw7eDuOEuZgMs83m2FIbmS+ycdOcVDmR pvjCpRMwd3J9YD21/ej+daoQaGrdA0XX1GAYWKmQlwl3YDhOQ/U9x7k/UxBAzb3f ZsxB8hqvPnwd5NGEhztBCl0r -----END PRIVATE KEY----- galera-4-26.4.25/tests/conf/galera-server-3.key000644 000164 177776 00000003250 15107057155 022214 0ustar00jenkinsnogroup000000 000000 -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC0g8LsqJHoMasH BZeVydaCoY5HlkOD/PjJ5HPnuJ1hIiBEZbWPWcg9saWE93x5KyO03KqgsqIt2aRG /UwoYYO1ZzFwgT8VoDgZ6FBHgjdPBtDy21fvsblEL91wqLZEMWlTcM12JKs6Nb5o I4wwUirVRfIWXGK9urzw2FK2lx84mcKx0hy/z3Z3EAzzaFPooqiciWvSYn7W1Qpz 1r/zNdkgnOxeTcEIaC+frOYiSfgxmXgfc+GSE6d5BcG4kb77JaA9rE+oM79Tbl0N g+ID2QPS8t+z3rEutaEOJVIVreGbdkZ8+cCHAASMqDLxOV9u86EFNm2rgiEPzRg3 7K3ySLzFAgMBAAECggEAMnuGz9h0tZLuWZBezC0eKHo16B9F0mu0yAFzyKIAoWGB 1J79H5HkRhygRYdQ8DH4UmRD850BbgEnyBKeccyToO+zC9uZYNrl1Zj6moT4Ismt Nb3R4d66CS+5pgAIuRl4czVgwGGQ4T6WeVk3o2vXPr3I7if4FUdb/57/wsCQoqsl WyEz7OUuM+6nI4cTnHO2fSctJlhDo6WCEoKHtLmwvgVJrCSJYaPFWdn7rzuKyZMY n/55E1crOSn3f+Wnm0x17Lv/vFXsBS9dYctj0mx17oMoFPePc/6O6MK4Fa3nojN8 0vrKCVwz9CBxbYow0B6DyiWFZn68Z3LeiPyqOBgZAQKBgQDb1/90IB+mU+ay1B48 ztL4g0xA5htxK1gz+HoH0niQ9u/OjzJYNwl24N61UZnmqygHMIQhV+Cq3bBDCfiP Roh/EG7ue2tws8IcadYQIXUTvSK9zcJ2JyfH/Qx4/llFPYPQQ8kdb5ZWNTve6TSk vREiNrmwMZhFqaxdiDak60IM6QKBgQDSM+ggU1oQX4vA5N+czmUoNW4wqAogY0lb V6b/GB2jdbAjQ622uirjPYeauq97ReLUAUZV741n38JSaAR3GFeb9stz1MNGFoTF mH55hvtXvWkDuhbOJL8V3R1OTXEH3GUfT1H6Tb5rLiEMbuHG9ETF+gxVnLgXbfW8 pNPTUzOXfQKBgQCUWyT2dp8lHUV3tJFbM69HKUOSIDawnjF9kNa45J2cJigaWqP8 x9dJM+LWtWSIN0Rh5amk3qqsY8II23ezKEQAi1Rw9zS/7260we9FH27kjSQetfXe yfmcifWayPnxMdv79WKIzL7FGlN20CVIbpZbYlbYwf8iM6gvzjYiAOin0QKBgClN M6XuWrru2xnanqlD0JJ29SCwU9ULTSWacmCbD2/HtwUmziIiMD0YIOXhGovBLpFz cqt99y3axGbGs2HnMYeelVk8C+ZglFPy457jS81wlQq7bLGyfK1CFfkFtFjUEDZ3 smbZEbojhTDZNJmP81dnCzdjJLTN0UPwqwJRzchNAoGAIb/BHjS2jptloFlR6wQf 6uReMpvYLTleBYp+j44UNqWG/Sj06pF4qFWqdN4Lflxk8BlfpZcBilp/Juc6StF7 yzl5vFDAfPo7uFUVrWhGqPbPtemnyFeZLnFBCHIORYJPCM3SEe/StHIg+GGKT2gy 4L1loGk3pJQdHTc7MDoNmBE= -----END PRIVATE KEY----- galera-4-26.4.25/tests/conf/gen-cert-chain.sh000755 000164 177776 00000004360 15107057155 021731 0ustar00jenkinsnogroup000000 000000 #!/usr/bin/env bash # # Copyright (C) 2021 Codership Oy # # Helper script to generate certificate chains for testing. # set -o errexit -o errtrace -o nounset set -o xtrace export EASYRSA_REQ_ORG="Codership Oy" export EASYRSA_REQ_EMAIL="devel@galeracluster.com" export EASYRSA_REQ_OU="Galera Devel" export EASYRSA_REQ_CITY="Helsinki" export EASYRSA_REQ_PROVINCE="Uusimaa" export EASYRSA_REQ_COUNTRY="FI" export EASYRSA_DN=org export EASYRSA_BATCH=yes # Init pki directory for root CA easyrsa --pki-dir=rootCA init-pki # Create root CA in pki/ca.crt easyrsa --pki-dir=rootCA --req-cn="Galera Root CA" build-ca nopass # Initialize pki directory for intermediate CA easyrsa --pki-dir=intCA init-pki easyrsa --pki-dir=intCA build-ca nopass subca # Create request for intermediate CA easyrsa --pki-dir=intCA --req-cn="Galera Int" gen-req ca nopass mkdir -p intCA/x509-types # Write custom server type for server certificates without # extendedKeyUsage. cat < intCA/x509-types/server basicConstraints = CA:FALSE subjectKeyIdentifier = hash authorityKeyIdentifier = keyid,issuer:always # extendedKeyUsage = serverAuth keyUsage = digitalSignature,keyEncipherment EOF # Copy request under rootCA cp intCA/reqs/ca.req rootCA/reqs/galera-int.req # Create intermediate CA in pki/issued/galear-int.crt easyrsa --pki-dir=rootCA sign-req ca galera-int # Copy generated intermediate CA under intCA cp rootCA/issued/galera-int.crt intCA/ca.crt # Create server certificates using intermediate CA for i in galera-server-1 galera-server-2 galera-server-3 do easyrsa --pki-dir=intCA --req-cn=$i gen-req $i nopass easyrsa --pki-dir=intCA sign-req server $i done cp rootCA/ca.crt galera-ca.pem cp intCA/ca.crt galera-int.pem find intCA/issued -name "*.crt" -exec sh -c 'x="{}"; cp "$x" "$(basename $x .crt).pem"' \; find intCA/private -name "*.key" -exec cp {} . \; # Validate generated certificates openssl verify -CAfile galera-ca.pem galera-int.pem for i in galera-server-1.pem galera-server-2.pem galera-server-3.pem do openssl verify -CAfile galera-ca.pem -untrusted galera-int.pem $i openssl x509 -in $i > bundle-$i openssl x509 -in galera-int.pem >> bundle-$i openssl crl2pkcs7 -nocrl -certfile bundle-$i | openssl pkcs7 -print_certs -noout done galera-4-26.4.25/tests/conf/galera_ca.pem000644 000164 177776 00000002436 15107057155 021211 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIDmzCCAoOgAwIBAgIUThWEH6e2r32TxCzqAFw7qlTe4IUwDQYJKoZIhvcNAQEL BQAwXTELMAkGA1UEBhMCRkkxETAPBgNVBAgMCEhlbHNpbmtpMREwDwYDVQQHDAhI ZWxzaW5raTESMBAGA1UECgwJQ29kZXJzaGlwMRQwEgYDVQQDDAtHYWxlcmEgUm9v dDAeFw0yMTAyMjgxOTQ4NTNaFw0yMzExMjUxOTQ4NTNaMF0xCzAJBgNVBAYTAkZJ MREwDwYDVQQIDAhIZWxzaW5raTERMA8GA1UEBwwISGVsc2lua2kxEjAQBgNVBAoM CUNvZGVyc2hpcDEUMBIGA1UEAwwLR2FsZXJhIFJvb3QwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQCqPInsAAcnnYLQXXUfqSyq/TVm90ZqLiHYDhzArwg/ Kq6L54ulD6zoFWp5jkTzA/DdZRkxeY7rYp8xPuI8PFCaXr6+HYV7Ft7SPZbdtBYL dimhH0XqQkFgR1jysSbRzlcUOSBekJFD5DzYMoK1rpHPmta6ZF7+QRy206/sNf4P vRAhJXeFIe8qUnuZEvVFAAjonVfHyR0YNFtvRrpHlqH9jBLN47qFdlQWSck3v3O1 wmjVNvPLkNLXfCf62+uzlXaztxKaLQiisO5ur4k1ryvWmZfttGVtORtsGDnU2Fzw dqdxKnKRoxoUFAlmQWnGFjigJiWbFwN7fvC7ARrxQ83JAgMBAAGjUzBRMB0GA1Ud DgQWBBTH4oT4mC2UZSn7U4EtroBfTvZbujAfBgNVHSMEGDAWgBTH4oT4mC2UZSn7 U4EtroBfTvZbujAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAR bid7qDGJ4HMlnHGmh8YdgYJrB6ChYvuVvcDSMMomHhlWuJUXplbjVBwi13cDQhOk IVGE7+zCW/wRxUyoA+2Z/YDJ5ekGdwav4zTOQ8XGD0aASxdkrfJLQZ+DGTqv43gL nRe7/MOJTtzCybRJRN8NMQysfukVr5qENoxnMGS40knJ/mV9mkb/du9nXi1OH21t y6kPjVC6axWOrbbaObC/d26QfD3V+t8sPu0q3PrAYhmgVYNUCuUV94iwlpDsn/9A TYQWE6v1JGDavX5T9S19nw6tsp2gmHy1bjDyTMfDDPnZ+1QQ7m0BbuKghnyh6qTL tIKBtDgvcmxl8bdsaTgB -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/nodes.conf.tmpl000644 000164 177776 00000003576 15107057155 021550 0ustar00jenkinsnogroup000000 000000 # # Nodes specific configuration # # Symbolic node ID - for output, for future use shoudl be unique declare -a NODE_ID # Location of the node. "local" - means this machine, anything else # is interpreted as SSH arguments declare -a NODE_LOCATION # Absolute path to where to unpack the distribution declare -a NODE_TEST_DIR # Address for incoming requests declare -a NODE_INCOMING_HOST declare -a NODE_INCOMING_PORT # Address for group communicaiton declare -a NODE_GCS_HOST declare -a NODE_GCS_PORT # Optional server configuration file to be copied to node declare -a NODE_MY_CNF declare -a NODE_PG_CNF # 1st node idx=0 NODE_ID[$idx]="home" NODE_INCOMING_HOST[$idx]=127.0.0.1 NODE_INCOMING_PORT[$idx]=$(( $DBMS_PORT - 1 )) NODE_GCS_HOST[$idx]=192.168.0.1 NODE_GCS_PORT[$idx]=$GCS_PORT NODE_LOCATION[$idx]="local" NODE_TEST_DIR[$idx]=/tmp/galera NODE_MY_CNF[$idx]="$TEST_BASE/conf/my.cnf" # 2nd node idx=$(( $idx + 1 )) NODE_ID[$idx]="bernhard" NODE_INCOMING_HOST[$idx]=192.168.0.3 NODE_INCOMING_PORT[$idx]=$DBMS_PORT NODE_GCS_HOST[$idx]=${NODE_INCOMING_HOST[$idx]} NODE_GCS_PORT[$idx]=$GCS_PORT NODE_LOCATION[$idx]="alex@${NODE_INCOMING_HOST[$idx]}" NODE_TEST_DIR[$idx]=/home/alex/codership/galera NODE_MY_CNF[$idx]="$TEST_BASE/conf/my.cnf" # 3rd node #idx=$(( $idx + 1 )) #NODE_ID[$idx]="bulldog" #NODE_INCOMING_HOST[$idx]=192.168.0.12 #NODE_INCOMING_PORT[$idx]=$DBMS_PORT #NODE_GCS_HOST[$idx]=${NODE_INCOMING_HOST[$idx]} #NODE_GCS_PORT[$idx]=$GCS_PORT #NODE_LOCATION[$idx]="alex@${NODE_INCOMING_HOST[$idx]}" #NODE_TEST_DIR[$idx]=/home/alex/codership/galera #NODE_MY_CNF[$idx]="$TEST_BASE/conf/my.cnf" declare -xr NODE_MAX=$idx declare -xr NODE_LIST=$(seq 0 $NODE_MAX) declare -xr NODE_ID declare -xr NODE_LOCATION declare -xr NODE_TEST_DIR declare -xr NODE_INCOMING_HOST declare -xr NODE_INCOMING_PORT declare -xr NODE_GCS_HOST declare -xr NODE_GCS_PORT declare -xr NODE_MY_CNF declare -xr NODE_PG_CNF # galera-4-26.4.25/tests/conf/bundle-galera-server-2.pem000644 000164 177776 00000006766 15107057155 023472 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIFBTCCA+2gAwIBAgIQCXWJCSwVSlnhRIBsZRqhoTANBgkqhkiG9w0BAQsFADCB nTELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDETMBEGA1UEAwwKR2FsZXJhIEludDEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2Fs ZXJhY2x1c3Rlci5jb20wHhcNMjEwMTIxMTAzMDA2WhcNMjQwMTA2MTAzMDA2WjCB ojELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDEYMBYGA1UEAwwPZ2FsZXJhLXNlcnZlci0yMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBANe/MV8pWNQhcjaCIFgOZpshRB7z12EJeKq7jrZDHvePzlurD+BhtWKyFvS6 l5hYD/vRyU4vMCevamKCOsuW0kU1l8iveadjDMtxVhMjqcY2JDjW5ehpCoq96UGs Mzs8ftTCziOQolXkcAGNJRaKddc/0p5CsknIIDpxNs4dXnGAuZZuLNXF6fpVcFk9 uc36uaD2OKH0vy4fCE0xravi4jYz0vkrOm6UtBzxaLn+EH51GwtCcvxkrQadbssg /F+thkImzfA56FDfebcdNqHINa5KoPXlw8AsJ7biB1hPGb2ixi/KKRNFMko8LQKZ FbHNmYm1xS8QH+CnCWOVdBPginUCAwEAAaOCATgwggE0MAkGA1UdEwQCMAAwHQYD VR0OBBYEFLnNuTsBB87YXjeWHmunNOB4YHMWMIHeBgNVHSMEgdYwgdOAFMcfdZZd WkrNcas5eD4dgC20E8L/oYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UECAwH VXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBP eTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9vdCBD QTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CEQCSzpcF 09pEs84e19r5/uCQMAsGA1UdDwQEAwIFoDAaBgNVHREEEzARgg9nYWxlcmEtc2Vy dmVyLTIwDQYJKoZIhvcNAQELBQADggEBAJX3x2zcK3+y+dgkVN6AZjaQfFJC6keG rw2B4xnl+Gv3x0cXZFcNjjAg9iNgzU6z6Ne+sAdC4CQERiLsb8hKQANG0wgIHp9H Goz6q+ssO5psjHKVIw9V3WsNZdJK2kVYj+VcSLfcSmO6WI17NyZuSv6d88RS2OVs HCZ/a5jwuVF66NUz/LF3JTqsiKsjgddgnFOymytyYqxS6Y5wgBrKjlTU+HVWfTae EpozWDVL0K39UrEY8M2JCBKcZajq7leHYmYc9NdATVyIm/t0hAJYMRrMkxyGvUT3 arwkBwH1SYUNqHrdVZnnNb5ojO6IKE1RkNUrYRahouJ/6ubGb23TQ6c= -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIE7zCCA9egAwIBAgIRAJLOlwXT2kSzzh7X2vn+4JAwDQYJKoZIhvcNAQELBQAw gaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTAeFw0yMTAxMjExMDMwMDZaFw0yNDAxMDYxMDMw MDZaMIGdMQswCQYDVQQGEwJGSTEQMA4GA1UECAwHVXVzaW1hYTERMA8GA1UEBwwI SGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBPeTEVMBMGA1UECwwMR2FsZXJh IERldmVsMRMwEQYDVQQDDApHYWxlcmEgSW50MSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBAM67vDBE4ALnYbfDKtAphdd/rFSmtiaOmZcl58RAPpaaQQ/jxLwNvVgsaCh/ 2boWBXhGNCzB175sxcK2XkKEn9MHENARKDzfdFoIQjPqQyZxX6CupYOdnA/B9+l/ +6uW7Iu+N6UP+IUeW/ElWzbIh5k/mpzOr16r/MCmxD9dvB7i1C5+bvXB6lasN2ot tldw6cQtrzGBmYRl3f/hUq3j9gwrPm3SVfuoEPCoUetobgUZbKdi0jHuYRi4y60T +Vn7Xx1fcPz9qwyWJHtw9ERr/6KWhxOk9+rxiUmNIah337p+nOe282lCmnI+hMLo CSeyjlPVzIxQY5KjTFW4zu6wTWkCAwEAAaOCASIwggEeMAwGA1UdEwQFMAMBAf8w HQYDVR0OBBYEFMcfdZZdWkrNcas5eD4dgC20E8L/MIHhBgNVHSMEgdkwgdaAFMQb 2PuGq8uVODtrOl5vGvhP/0rwoYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UE CAwHVXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hp cCBPeTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9v dCBDQTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CFHkh Yqjh/DAHX8QPlm+UBTerUrwzMAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOC AQEAaJuugEguz+T4V8umj0zAv/yfWOw+647scCJpfF1P2q2GSTDGG3LkjrDuKMK+ zp0hAHdXhDD8XDOh2q6eMbUTCVUFqnM6ss4os/HyK5f5UCv3gJaSXJm3GAGgmZze HsdDPRTePp1Mr21EjChsWEUAb6EKA5F6ezUVglFwi4uD00FNYA/If7mbizomohS9 JvXPhriy9cB1jaLgP5UOl1tT3CJUjNY2Jdk3RKOIEthUaHxY7xnJVLGrLQrMBjG7 dKPuxucxvpKBAMhqnLdNVBYg3wb7WAzAMVedUrJWpXaWzWIsjLtadt6M3kCwYKtS h3vB5D/rqXSP0pdm50ihb3yRHA== -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/dummy.conf000644 000164 177776 00000000077 15107057155 020611 0ustar00jenkinsnogroup000000 000000 # Script arguments t/dummy.sh arg1 arg2 t/dummy.sh arg3 arg4 galera-4-26.4.25/tests/conf/galera-server-1.pem000644 000164 177776 00000012411 15107057155 022202 0ustar00jenkinsnogroup000000 000000 Certificate: Data: Version: 3 (0x2) Serial Number: d5:f7:c4:d3:19:e1:68:c4:c1:6b:4a:a5:43:62:4a:95 Signature Algorithm: sha256WithRSAEncryption Issuer: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=Galera Int/emailAddress=devel@galeracluster.com Validity Not Before: Jan 21 10:30:06 2021 GMT Not After : Jan 6 10:30:06 2024 GMT Subject: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=galera-server-1/emailAddress=devel@galeracluster.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:a6:e7:d0:7b:91:6a:79:c1:a8:61:2d:9e:9d:14: 68:8a:b0:2c:a0:3a:bc:8e:54:92:af:1a:43:27:a5: 93:a9:e5:4b:01:5c:f3:0f:18:72:d0:49:14:d2:bd: e7:c3:83:b8:07:73:53:b7:a6:ac:be:28:d1:1b:e1: c9:2b:9d:dc:14:a8:74:48:61:ba:8e:7a:ae:1e:33: ec:a8:c6:fd:84:1c:50:41:15:b9:f2:50:04:e7:3a: 82:9d:61:f8:f2:a7:2b:ae:ed:45:ae:0e:34:b0:bb: 2e:28:0d:3c:45:a7:2e:d6:6e:d6:8b:d7:36:62:45: 12:f5:cc:ae:06:df:15:75:64:bd:f7:d0:1d:dc:1a: 29:5f:4f:4c:9d:8c:11:0a:7c:43:c9:0d:ea:62:e3: 56:5b:63:ba:7c:b7:92:82:36:f0:2b:23:c1:50:1a: 0b:dc:75:92:1c:fb:0b:14:10:df:cf:5c:4d:d7:9c: 6c:b7:f3:6f:c5:9f:d6:a9:62:6d:03:86:58:2c:cc: 8a:93:0b:1a:91:67:81:bd:12:7e:56:a9:e5:e0:3f: a1:16:87:56:8e:12:e4:8f:35:34:c7:5e:1d:f4:a4: 5e:78:62:c3:65:6a:85:b1:ee:04:e9:a4:4a:8b:3e: 9c:8f:c0:72:79:03:03:4a:3e:4a:99:43:3c:49:d0: 61:e7 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE X509v3 Subject Key Identifier: 14:9A:3C:5D:69:13:D8:7B:E8:5E:92:2F:B0:45:32:47:52:F2:A4:85 X509v3 Authority Key Identifier: keyid:C7:1F:75:96:5D:5A:4A:CD:71:AB:39:78:3E:1D:80:2D:B4:13:C2:FF DirName:/C=FI/ST=Uusimaa/L=Helsinki/O=Codership Oy/OU=Galera Devel/CN=Galera Root CA/emailAddress=devel@galeracluster.com serial:92:CE:97:05:D3:DA:44:B3:CE:1E:D7:DA:F9:FE:E0:90 X509v3 Key Usage: Digital Signature, Key Encipherment X509v3 Subject Alternative Name: DNS:galera-server-1 Signature Algorithm: sha256WithRSAEncryption a4:ae:58:97:d3:06:7f:f8:56:52:6c:b0:56:f1:d7:b6:67:b8: 7a:1f:65:02:af:45:1c:fc:3b:5d:55:64:40:5b:5c:51:dd:3a: 26:64:4a:38:2c:fe:ab:74:12:b5:f2:0a:c3:e8:58:3f:16:15: b8:97:62:a5:34:d1:e8:4b:69:f8:7c:e5:af:f7:00:67:2f:43: 50:eb:94:58:7f:10:73:63:2a:b9:5c:1d:27:10:dd:98:e8:50: 53:24:5a:30:4e:ba:f4:db:33:25:9f:0f:35:92:90:d6:53:e1: 5b:74:cb:bd:13:0b:2e:4c:d1:bf:36:68:bb:70:ff:1e:d8:87: 9f:77:54:d0:d9:15:04:97:e5:97:25:92:ef:e6:ea:4b:12:74: 6e:a5:c9:7e:45:aa:f9:3e:e4:7e:65:a3:68:ee:d1:12:e7:33: 5e:67:cf:de:48:d0:c0:c0:d0:45:00:ef:a8:08:30:c6:ff:3e: 09:88:ad:79:9a:ec:35:e0:a5:bd:3d:37:9d:33:11:f0:5f:84: fb:e8:18:67:b5:cb:26:6f:25:c1:1b:2e:fd:c2:d2:98:d0:3c: 54:15:d1:1d:05:73:c5:2d:be:00:ef:cf:e7:53:0a:4f:b7:51: 26:f9:b8:31:50:56:f9:ea:f7:a5:3a:ad:ca:93:08:1d:93:2d: d3:29:ac:e8 -----BEGIN CERTIFICATE----- MIIFBjCCA+6gAwIBAgIRANX3xNMZ4WjEwWtKpUNiSpUwDQYJKoZIhvcNAQELBQAw gZ0xCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxEzARBgNVBAMMCkdhbGVyYSBJbnQxJjAkBgkqhkiG9w0BCQEWF2RldmVsQGdh bGVyYWNsdXN0ZXIuY29tMB4XDTIxMDEyMTEwMzAwNloXDTI0MDEwNjEwMzAwNlow gaIxCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxGDAWBgNVBAMMD2dhbGVyYS1zZXJ2ZXItMTEmMCQGCSqGSIb3DQEJARYXZGV2 ZWxAZ2FsZXJhY2x1c3Rlci5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK AoIBAQCm59B7kWp5wahhLZ6dFGiKsCygOryOVJKvGkMnpZOp5UsBXPMPGHLQSRTS vefDg7gHc1O3pqy+KNEb4ckrndwUqHRIYbqOeq4eM+yoxv2EHFBBFbnyUATnOoKd Yfjypyuu7UWuDjSwuy4oDTxFpy7WbtaL1zZiRRL1zK4G3xV1ZL330B3cGilfT0yd jBEKfEPJDepi41ZbY7p8t5KCNvArI8FQGgvcdZIc+wsUEN/PXE3XnGy382/Fn9ap Ym0DhlgszIqTCxqRZ4G9En5WqeXgP6EWh1aOEuSPNTTHXh30pF54YsNlaoWx7gTp pEqLPpyPwHJ5AwNKPkqZQzxJ0GHnAgMBAAGjggE4MIIBNDAJBgNVHRMEAjAAMB0G A1UdDgQWBBQUmjxdaRPYe+heki+wRTJHUvKkhTCB3gYDVR0jBIHWMIHTgBTHH3WW XVpKzXGrOXg+HYAttBPC/6GBp6SBpDCBoTELMAkGA1UEBhMCRkkxEDAOBgNVBAgM B1V1c2ltYWExETAPBgNVBAcMCEhlbHNpbmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAg T3kxFTATBgNVBAsMDEdhbGVyYSBEZXZlbDEXMBUGA1UEAwwOR2FsZXJhIFJvb3Qg Q0ExJjAkBgkqhkiG9w0BCQEWF2RldmVsQGdhbGVyYWNsdXN0ZXIuY29tghEAks6X BdPaRLPOHtfa+f7gkDALBgNVHQ8EBAMCBaAwGgYDVR0RBBMwEYIPZ2FsZXJhLXNl cnZlci0xMA0GCSqGSIb3DQEBCwUAA4IBAQCkrliX0wZ/+FZSbLBW8de2Z7h6H2UC r0Uc/DtdVWRAW1xR3TomZEo4LP6rdBK18grD6Fg/FhW4l2KlNNHoS2n4fOWv9wBn L0NQ65RYfxBzYyq5XB0nEN2Y6FBTJFowTrr02zMlnw81kpDWU+FbdMu9EwsuTNG/ Nmi7cP8e2Iefd1TQ2RUEl+WXJZLv5upLEnRupcl+Rar5PuR+ZaNo7tES5zNeZ8/e SNDAwNBFAO+oCDDG/z4JiK15muw14KW9PTedMxHwX4T76BhntcsmbyXBGy79wtKY 0DxUFdEdBXPFLb4A78/nUwpPt1Em+bgxUFb56velOq3Kkwgdky3TKazo -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/galera_cert.pem000644 000164 177776 00000002214 15107057155 021555 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIDLjCCAhYCAQEwDQYJKoZIhvcNAQELBQAwXTELMAkGA1UEBhMCRkkxETAPBgNV BAgMCEhlbHNpbmtpMREwDwYDVQQHDAhIZWxzaW5raTESMBAGA1UECgwJQ29kZXJz aGlwMRQwEgYDVQQDDAtHYWxlcmEgUm9vdDAeFw0yMTAyMjgxOTQ4NTNaFw0yMzEx MjUxOTQ4NTNaMF0xCzAJBgNVBAYTAkZJMREwDwYDVQQIDAhIZWxzaW5raTERMA8G A1UEBwwISGVsc2lua2kxEjAQBgNVBAoMCUNvZGVyc2hpcDEUMBIGA1UEAwwLR2Fs ZXJhIE5vZGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHrDam1TsH QPm+E7tZcI8dGpa0wTooKh3WWPRIjcpOuDXlV8cxyK3YZAmquAT4iWcJQZZq8t+/ LrpGK5i9sQl8dAjLftJKIRquEdwO027xF0hsqPiDMvfbBoy8af78tf3d3AFvq+Sw L5jLnTOYkAqEYXbK6m64/mAN0npyrqX9ULsPQkQggnbZYYJfRa3V3I2QfLMlFD7Y 8oy5fZTKOUOf8F1neCGNlMtGXWl3I3CW13uVyS2JmZSmI+iPtxmFOunLK+ymfmmH +6/v2EGOPZ2JNbQ1jLFPlLMfgKamSPzkI0U0jAsomFtiJqUceJFdl+WAQXKc7OUW yd5rGYvRJmGdAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEw6yadB3SwUzRQsPkSs BhlpbbukSLwWXomvVIy6eTWGCbOxA5RsXaiiNbnGY1DfYsRZJnJjZzTdQ7KWWIIC 4SB2AxOGu7GyfQFXLF8JcNJ3tTwsrQO4s1Rv3ZSVrtymOjyRXgzAWX/chek4cBzB AzqbI6PdIDDE7JEWJgj71q/pksUvaefviXrINQfkOo8PWvVWUH1Gw+dUi9Jj3JQd CrMzFQBoVgzzICRjxZoG1XtljMeoepqpbMwjp589JUNRyAmvy1yrMnxablLdLPdO zehLvs2rVXYo3vK8MAHz4OLa41Uj/0KsKhKdFgWrxGoe1mYR58wy3+Yni8g+1hbt k64= -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/README.md000644 000164 177776 00000001646 15107057155 020071 0ustar00jenkinsnogroup000000 000000 Certificates ============ Note: These certificates should be used for testing purposes only. Certificate and key files in this directory: Simple key and certificate -------------------------- This key and certificate must be identical on each node: - galera_key.pem/galera_cert.pem - standalone key/certificate which can be used for testing in symmetric setups Certificate Chain ----------------- Keys and certificates below have been created by using easy-rsa 3 CLI utility: https://github.com/OpenVPN/easy-rsa - galera-ca.pem - Root CA certificate for testing - galera-int.pem - Intermediate certificate for testing - galera-server-n.key Server private key for node n - galera-server-n.pem - Server certificate for node n - bundle-galera-server-n.pem File containing both server and intermediate certificate for node n See script gen-cert-chain.sh in this same directory for chain generation.galera-4-26.4.25/tests/conf/my.cnf.2.tmpl000644 000164 177776 00000000136 15107057155 021033 0ustar00jenkinsnogroup000000 000000 wsrep_sst_method=rsync wsrep_sst_auth=root:rootpass wsrep_sst_receive_address=127.0.1.1:10023 galera-4-26.4.25/tests/conf/cluster.conf.tmpl000644 000164 177776 00000003164 15107057155 022112 0ustar00jenkinsnogroup000000 000000 # # Cluster resources configuration. # # DBMS that will be used for tests export DBMS=${DBMS:-"MYSQL"} # DBMS superuser username and password for administrative purposes export DBMS_ROOT_USER=${DBMS_ROOT_USER:-"root"} export DBMS_ROOT_PSWD=${DBMS_ROOT_PSWD:-"rootpass"} # DBMS test use username and password export DBMS_TEST_USER=${DBMS_TEST_USER:-"test"} export DBMS_TEST_PSWD=${DBMS_TEST_PSWD:-"testpass"} # DBMS schema to use for tests export DBMS_TEST_SCHEMA=${DBMS_TEST_SCHEMA:-"test"} # Host for clients to connect to export DBMS_HOST=${DBMS_HOST:-"127.0.1.1"} # Port for MySQL specific tests export MYSQL_PORT=${MYSQL_PORT:-"3306"} # Port for PostgreSQL specific tests export PGSQL_PORT=${PGSQL_PORT:-"5432"} # Port for crossplatform tests case "$DBMS" in "MYSQL") export DBMS_PORT=${DBMS_PORT:-"$MYSQL_PORT"} ;; "PGSQL") export DBMS_PORT=${DBMS_PORT:-"$PGSQL_PORT"} ;; esac # How many concurrent clients to use export DBMS_CLIENTS=${DBMS_CLIENTS:-"16"} # Type of GCS backend export GCS_TYPE=${GCS_TYPE:-"gcomm"} case "$GCS_TYPE" in "gcomm") ;; "vsbes") if [ -z "$VSBES_ADDRESS" ]; then echo "VSBES_ADDRESS is not set"; exit 1; fi ;; *) echo "Urecognized GCS_TYPE: '$GCS_TYPE'" ; exit 1 ;; esac # Define extra parameters passed to gcomm backend if needed, # e.g. using multicast: # # export GCOMM_EXTRA_PARAMS=${GCOMM_EXTRA_PARAMS:-"gmcast.mcast_addr=239.192.0.11"} export GCOMM_EXTRA_PARAMS="" # default replication port export GCS_PORT=4567 # common part of my.cnf export COMMON_MY_CNF=$BASE_CONF/common_my.cnf # libglb.so location if not standard (/usr/lib|/usr/local/lib) #GLB_LIB= . $BASE_CONF/nodes.conf # end galera-4-26.4.25/tests/conf/galera-int.pem000644 000164 177776 00000012234 15107057155 021333 0ustar00jenkinsnogroup000000 000000 Certificate: Data: Version: 3 (0x2) Serial Number: 92:ce:97:05:d3:da:44:b3:ce:1e:d7:da:f9:fe:e0:90 Signature Algorithm: sha256WithRSAEncryption Issuer: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=Galera Root CA/emailAddress=devel@galeracluster.com Validity Not Before: Jan 21 10:30:06 2021 GMT Not After : Jan 6 10:30:06 2024 GMT Subject: C=FI, ST=Uusimaa, L=Helsinki, O=Codership Oy, OU=Galera Devel, CN=Galera Int/emailAddress=devel@galeracluster.com Subject Public Key Info: Public Key Algorithm: rsaEncryption RSA Public-Key: (2048 bit) Modulus: 00:ce:bb:bc:30:44:e0:02:e7:61:b7:c3:2a:d0:29: 85:d7:7f:ac:54:a6:b6:26:8e:99:97:25:e7:c4:40: 3e:96:9a:41:0f:e3:c4:bc:0d:bd:58:2c:68:28:7f: d9:ba:16:05:78:46:34:2c:c1:d7:be:6c:c5:c2:b6: 5e:42:84:9f:d3:07:10:d0:11:28:3c:df:74:5a:08: 42:33:ea:43:26:71:5f:a0:ae:a5:83:9d:9c:0f:c1: f7:e9:7f:fb:ab:96:ec:8b:be:37:a5:0f:f8:85:1e: 5b:f1:25:5b:36:c8:87:99:3f:9a:9c:ce:af:5e:ab: fc:c0:a6:c4:3f:5d:bc:1e:e2:d4:2e:7e:6e:f5:c1: ea:56:ac:37:6a:2d:b6:57:70:e9:c4:2d:af:31:81: 99:84:65:dd:ff:e1:52:ad:e3:f6:0c:2b:3e:6d:d2: 55:fb:a8:10:f0:a8:51:eb:68:6e:05:19:6c:a7:62: d2:31:ee:61:18:b8:cb:ad:13:f9:59:fb:5f:1d:5f: 70:fc:fd:ab:0c:96:24:7b:70:f4:44:6b:ff:a2:96: 87:13:a4:f7:ea:f1:89:49:8d:21:a8:77:df:ba:7e: 9c:e7:b6:f3:69:42:9a:72:3e:84:c2:e8:09:27:b2: 8e:53:d5:cc:8c:50:63:92:a3:4c:55:b8:ce:ee:b0: 4d:69 Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:TRUE X509v3 Subject Key Identifier: C7:1F:75:96:5D:5A:4A:CD:71:AB:39:78:3E:1D:80:2D:B4:13:C2:FF X509v3 Authority Key Identifier: keyid:C4:1B:D8:FB:86:AB:CB:95:38:3B:6B:3A:5E:6F:1A:F8:4F:FF:4A:F0 DirName:/C=FI/ST=Uusimaa/L=Helsinki/O=Codership Oy/OU=Galera Devel/CN=Galera Root CA/emailAddress=devel@galeracluster.com serial:79:21:62:A8:E1:FC:30:07:5F:C4:0F:96:6F:94:05:37:AB:52:BC:33 X509v3 Key Usage: Certificate Sign, CRL Sign Signature Algorithm: sha256WithRSAEncryption 68:9b:ae:80:48:2e:cf:e4:f8:57:cb:a6:8f:4c:c0:bf:fc:9f: 58:ec:3e:eb:8e:ec:70:22:69:7c:5d:4f:da:ad:86:49:30:c6: 1b:72:e4:8e:b0:ee:28:c2:be:ce:9d:21:00:77:57:84:30:fc: 5c:33:a1:da:ae:9e:31:b5:13:09:55:05:aa:73:3a:b2:ce:28: b3:f1:f2:2b:97:f9:50:2b:f7:80:96:92:5c:99:b7:18:01:a0: 99:9c:de:1e:c7:43:3d:14:de:3e:9d:4c:af:6d:44:8c:28:6c: 58:45:00:6f:a1:0a:03:91:7a:7b:35:15:82:51:70:8b:8b:83: d3:41:4d:60:0f:c8:7f:b9:9b:8b:3a:26:a2:14:bd:26:f5:cf: 86:b8:b2:f5:c0:75:8d:a2:e0:3f:95:0e:97:5b:53:dc:22:54: 8c:d6:36:25:d9:37:44:a3:88:12:d8:54:68:7c:58:ef:19:c9: 54:b1:ab:2d:0a:cc:06:31:bb:74:a3:ee:c6:e7:31:be:92:81: 00:c8:6a:9c:b7:4d:54:16:20:df:06:fb:58:0c:c0:31:57:9d: 52:b2:56:a5:76:96:cd:62:2c:8c:bb:5a:76:de:8c:de:40:b0: 60:ab:52:87:7b:c1:e4:3f:eb:a9:74:8f:d2:97:66:e7:48:a1: 6f:7c:91:1c -----BEGIN CERTIFICATE----- MIIE7zCCA9egAwIBAgIRAJLOlwXT2kSzzh7X2vn+4JAwDQYJKoZIhvcNAQELBQAw gaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTAeFw0yMTAxMjExMDMwMDZaFw0yNDAxMDYxMDMw MDZaMIGdMQswCQYDVQQGEwJGSTEQMA4GA1UECAwHVXVzaW1hYTERMA8GA1UEBwwI SGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBPeTEVMBMGA1UECwwMR2FsZXJh IERldmVsMRMwEQYDVQQDDApHYWxlcmEgSW50MSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBAM67vDBE4ALnYbfDKtAphdd/rFSmtiaOmZcl58RAPpaaQQ/jxLwNvVgsaCh/ 2boWBXhGNCzB175sxcK2XkKEn9MHENARKDzfdFoIQjPqQyZxX6CupYOdnA/B9+l/ +6uW7Iu+N6UP+IUeW/ElWzbIh5k/mpzOr16r/MCmxD9dvB7i1C5+bvXB6lasN2ot tldw6cQtrzGBmYRl3f/hUq3j9gwrPm3SVfuoEPCoUetobgUZbKdi0jHuYRi4y60T +Vn7Xx1fcPz9qwyWJHtw9ERr/6KWhxOk9+rxiUmNIah337p+nOe282lCmnI+hMLo CSeyjlPVzIxQY5KjTFW4zu6wTWkCAwEAAaOCASIwggEeMAwGA1UdEwQFMAMBAf8w HQYDVR0OBBYEFMcfdZZdWkrNcas5eD4dgC20E8L/MIHhBgNVHSMEgdkwgdaAFMQb 2PuGq8uVODtrOl5vGvhP/0rwoYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UE CAwHVXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hp cCBPeTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9v dCBDQTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CFHkh Yqjh/DAHX8QPlm+UBTerUrwzMAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOC AQEAaJuugEguz+T4V8umj0zAv/yfWOw+647scCJpfF1P2q2GSTDGG3LkjrDuKMK+ zp0hAHdXhDD8XDOh2q6eMbUTCVUFqnM6ss4os/HyK5f5UCv3gJaSXJm3GAGgmZze HsdDPRTePp1Mr21EjChsWEUAb6EKA5F6ezUVglFwi4uD00FNYA/If7mbizomohS9 JvXPhriy9cB1jaLgP5UOl1tT3CJUjNY2Jdk3RKOIEthUaHxY7xnJVLGrLQrMBjG7 dKPuxucxvpKBAMhqnLdNVBYg3wb7WAzAMVedUrJWpXaWzWIsjLtadt6M3kCwYKtS h3vB5D/rqXSP0pdm50ihb3yRHA== -----END CERTIFICATE----- galera-4-26.4.25/tests/conf/sqlgen.conf000644 000164 177776 00000000170 15107057155 020741 0ustar00jenkinsnogroup000000 000000 # Script test_sqlgen/run.sh --create 1 --rows 500 --duration 10 test_sqlgen/run.sh --create 1 --rows 1000 --duration 10 galera-4-26.4.25/tests/conf/galera_key.pem000644 000164 177776 00000003250 15107057155 021411 0ustar00jenkinsnogroup000000 000000 -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDHrDam1TsHQPm+ E7tZcI8dGpa0wTooKh3WWPRIjcpOuDXlV8cxyK3YZAmquAT4iWcJQZZq8t+/LrpG K5i9sQl8dAjLftJKIRquEdwO027xF0hsqPiDMvfbBoy8af78tf3d3AFvq+SwL5jL nTOYkAqEYXbK6m64/mAN0npyrqX9ULsPQkQggnbZYYJfRa3V3I2QfLMlFD7Y8oy5 fZTKOUOf8F1neCGNlMtGXWl3I3CW13uVyS2JmZSmI+iPtxmFOunLK+ymfmmH+6/v 2EGOPZ2JNbQ1jLFPlLMfgKamSPzkI0U0jAsomFtiJqUceJFdl+WAQXKc7OUWyd5r GYvRJmGdAgMBAAECggEAJxrIHUZXaCVGg/5TS8HqlVWWvFG3dRRCga8sMGVsgJw9 Gexl+48E1QNKjV+6wX4PatWogsuMHGInYwS4xjW82N3Utx3gS1pOYlbnFRQ9fwQD UByLhw5dUkznFSNhJ9bwoBmtQlSU1hiMQRxJdZCILsLzw+vDwYE4CJz3FCy2H3Tt 9jVrt0hkWBpm6mE188U/0439vxbOz8aIQmH0maaGYMXNi+2cGmTHwIZr89c+MLJN yH7wG1cveCCMbQtesgkl8F9+GqTJlByt3DWCupQ6jrcvEGRaybD8ELrVxPZkFckQ UXOBrZzS/Jx4bmK/4Vo/ljl5qaMLnrY/2WkJgpzPFQKBgQD95aM2NBU5iI+Ls4jo moKUK2eErZWXwh2sCxlByXtvuEL0fDrtjsbwtn9usmtjHvRHz9rYeXhp4kkteUOe DptTvQrBlbPuLZMQ5j/46fw+cCF6NgM8IhwHwhy2xifk7eHbjxBPQtDOeNZPekIV kOwLYrFP1/xZrPOFPvTanlt62wKBgQDJU5lPxPjx6XfFEiKGiytPUuVn98r3TFaf SCCR4PxgS4+GcYx3Oz2+sSwtGwThxN38pd0/N5iGRML2+Nsp+mLfZ3oEB5QL1xAL Q4duW1DOeXsHpPTM9OkbWCWOvsHffikyYyDXBWIOVCwnEFkahgCAc4G8yTGfGrx3 xosdHD9y5wKBgQD1TOZROiS/f1bgrEa8JHvAV+cd4u/CvYMZc2eljdo6aBYYS+ZL GkTO0CNyeeMS0xdEQme3+jQCaOQ2kRBpJsefMeewfMhod5O9Ihfwj3Brir5tar3Z XUMjo8FGPVDR95rdG+2wBmfi9BBqnT43w9qqbWHOOGjQ4y4sMFU02wabPQKBgGCk xN/KCkb2cAwmHHTBsdSuUnmKNeBowNxNX3Unr376RrefInLJ+WXk3vP4GGvYeUei x4ZlRc6Oi5jK9Uo9a+EaZzQv/x1/66+8hvKu2yeCoVCWGv2YQ55gvga8A40pntUV SNpvNxbxyRAnhN56nRsMV6csXammx/onUPh0avDZAoGBAJ+fM93yHuZ5zhJ17qN2 OsquEIq/4V8ChV77vLaWxlWIi48LYbMCDQMUxz+fcXw6v9nkFIDt0jEnn0cf54hf /oZNgQBKRgwvPbXVzBYt/9hdxZY0nCc54GnZe+dVx7cbC6PPQ/HB55lgFqfBbW5A 7N+ImmmIeBWkr5aXGKA+fvnj -----END PRIVATE KEY----- galera-4-26.4.25/tests/conf/bundle-galera-server-3.pem000644 000164 177776 00000006766 15107057155 023473 0ustar00jenkinsnogroup000000 000000 -----BEGIN CERTIFICATE----- MIIFBTCCA+2gAwIBAgIQL1zVZra4T1YP2fglm7GuXjANBgkqhkiG9w0BAQsFADCB nTELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDETMBEGA1UEAwwKR2FsZXJhIEludDEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2Fs ZXJhY2x1c3Rlci5jb20wHhcNMjEwMTIxMTAzMDA2WhcNMjQwMTA2MTAzMDA2WjCB ojELMAkGA1UEBhMCRkkxEDAOBgNVBAgMB1V1c2ltYWExETAPBgNVBAcMCEhlbHNp bmtpMRUwEwYDVQQKDAxDb2RlcnNoaXAgT3kxFTATBgNVBAsMDEdhbGVyYSBEZXZl bDEYMBYGA1UEAwwPZ2FsZXJhLXNlcnZlci0zMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBALSDwuyokegxqwcFl5XJ1oKhjkeWQ4P8+Mnkc+e4nWEiIERltY9ZyD2xpYT3 fHkrI7TcqqCyoi3ZpEb9TChhg7VnMXCBPxWgOBnoUEeCN08G0PLbV++xuUQv3XCo tkQxaVNwzXYkqzo1vmgjjDBSKtVF8hZcYr26vPDYUraXHziZwrHSHL/PdncQDPNo U+iiqJyJa9JiftbVCnPWv/M12SCc7F5NwQhoL5+s5iJJ+DGZeB9z4ZITp3kFwbiR vvsloD2sT6gzv1NuXQ2D4gPZA9Ly37PesS61oQ4lUhWt4Zt2Rnz5wIcABIyoMvE5 X27zoQU2bauCIQ/NGDfsrfJIvMUCAwEAAaOCATgwggE0MAkGA1UdEwQCMAAwHQYD VR0OBBYEFECqB4235rqo8UaJ6U9fnY2xQJcdMIHeBgNVHSMEgdYwgdOAFMcfdZZd WkrNcas5eD4dgC20E8L/oYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UECAwH VXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBP eTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9vdCBD QTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CEQCSzpcF 09pEs84e19r5/uCQMAsGA1UdDwQEAwIFoDAaBgNVHREEEzARgg9nYWxlcmEtc2Vy dmVyLTMwDQYJKoZIhvcNAQELBQADggEBAB+34pTwIw8mObbi9GjKRw0eXEm7GRTY iz+I2QXGaZq71GHvGd3iyGgmdwz8ffwAdd4BuZjNECMYueBrDnNdYz8ilRjfEQQN OkNOfjt17p/NHE0FksSLPAIi9ewaUDL/oVvPjQdKpJehv2pcsSTvK9YNfHAdWDWL 2uM3vsyUyyoFqjLepgCcxPbL7Ar+zC/c53Z/YTAjrv7/9lc0nD2P/7MKb3v5/OJb qheijRBfRu4jmKkGuSwRUv4U7QJMha8G0Pe7+KrTmw9cXBDffKjuase5Z+Oe/swk Zgg7b4BTHZnllKDZvCerGibHBA7SPlxdrH0GGt7VKL21liNZ4psqywo= -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIE7zCCA9egAwIBAgIRAJLOlwXT2kSzzh7X2vn+4JAwDQYJKoZIhvcNAQELBQAw gaExCzAJBgNVBAYTAkZJMRAwDgYDVQQIDAdVdXNpbWFhMREwDwYDVQQHDAhIZWxz aW5raTEVMBMGA1UECgwMQ29kZXJzaGlwIE95MRUwEwYDVQQLDAxHYWxlcmEgRGV2 ZWwxFzAVBgNVBAMMDkdhbGVyYSBSb290IENBMSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTAeFw0yMTAxMjExMDMwMDZaFw0yNDAxMDYxMDMw MDZaMIGdMQswCQYDVQQGEwJGSTEQMA4GA1UECAwHVXVzaW1hYTERMA8GA1UEBwwI SGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hpcCBPeTEVMBMGA1UECwwMR2FsZXJh IERldmVsMRMwEQYDVQQDDApHYWxlcmEgSW50MSYwJAYJKoZIhvcNAQkBFhdkZXZl bEBnYWxlcmFjbHVzdGVyLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBAM67vDBE4ALnYbfDKtAphdd/rFSmtiaOmZcl58RAPpaaQQ/jxLwNvVgsaCh/ 2boWBXhGNCzB175sxcK2XkKEn9MHENARKDzfdFoIQjPqQyZxX6CupYOdnA/B9+l/ +6uW7Iu+N6UP+IUeW/ElWzbIh5k/mpzOr16r/MCmxD9dvB7i1C5+bvXB6lasN2ot tldw6cQtrzGBmYRl3f/hUq3j9gwrPm3SVfuoEPCoUetobgUZbKdi0jHuYRi4y60T +Vn7Xx1fcPz9qwyWJHtw9ERr/6KWhxOk9+rxiUmNIah337p+nOe282lCmnI+hMLo CSeyjlPVzIxQY5KjTFW4zu6wTWkCAwEAAaOCASIwggEeMAwGA1UdEwQFMAMBAf8w HQYDVR0OBBYEFMcfdZZdWkrNcas5eD4dgC20E8L/MIHhBgNVHSMEgdkwgdaAFMQb 2PuGq8uVODtrOl5vGvhP/0rwoYGnpIGkMIGhMQswCQYDVQQGEwJGSTEQMA4GA1UE CAwHVXVzaW1hYTERMA8GA1UEBwwISGVsc2lua2kxFTATBgNVBAoMDENvZGVyc2hp cCBPeTEVMBMGA1UECwwMR2FsZXJhIERldmVsMRcwFQYDVQQDDA5HYWxlcmEgUm9v dCBDQTEmMCQGCSqGSIb3DQEJARYXZGV2ZWxAZ2FsZXJhY2x1c3Rlci5jb22CFHkh Yqjh/DAHX8QPlm+UBTerUrwzMAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOC AQEAaJuugEguz+T4V8umj0zAv/yfWOw+647scCJpfF1P2q2GSTDGG3LkjrDuKMK+ zp0hAHdXhDD8XDOh2q6eMbUTCVUFqnM6ss4os/HyK5f5UCv3gJaSXJm3GAGgmZze HsdDPRTePp1Mr21EjChsWEUAb6EKA5F6ezUVglFwi4uD00FNYA/If7mbizomohS9 JvXPhriy9cB1jaLgP5UOl1tT3CJUjNY2Jdk3RKOIEthUaHxY7xnJVLGrLQrMBjG7 dKPuxucxvpKBAMhqnLdNVBYg3wb7WAzAMVedUrJWpXaWzWIsjLtadt6M3kCwYKtS h3vB5D/rqXSP0pdm50ihb3yRHA== -----END CERTIFICATE----- galera-4-26.4.25/tests/scripts/000755 000164 177776 00000000000 15107057160 017341 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/tests/scripts/jobs.sh000644 000164 177776 00000005314 15107057155 020641 0ustar00jenkinsnogroup000000 000000 # # Routines pertaining to parallel execution # # xxxx_job() functions assume node index as last parameter # # Logging must happen on different layers. # e.g. logging of stderr must happen on the same level as recording of return # code. While stdout log should record only the output of command. #_local_job() #{ # local cmd="$1" # eval "$($@)" # eval $cmd #} #_ssh_job() #{ # local node=${@:$#} # last argument # local cmd="$($@)" # local cmd="$1" # # ssh -ax ${NODE_LOCATION[$node]} "$cmd" #} _date() { echo -n $(date +'%y%m%d %T.%N' | cut -c 1-19) } virtual_job() { local node=${@:$#} # last argument local out="$BASE_OUT/${1}_${NODE_ID[$node]}.out" local cmd="$($@)" if [ "${NODE_LOCATION[$node]}" = "local" ] then # local_job "$cmd" 1>"$out" eval "$cmd" 1>"$out" 2>&1 else # ssh_job "$cmd" 1>"$out" ssh -ax -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${NODE_LOCATION[$node]} "$cmd" 1>"$out" 2>&1 fi } # This function tries to add a bit of polymorphism by treating untar cmd # specially. The speciality comes from that it not only excutes command, # but also transfers data # # Usage: node_job cmd opt1 opt2 ... optN node # node_job() { local cmd=$1 shift local node=${@:$#} # last argument local node_id="${NODE_ID[$node]}" local prefix="$BASE_RUN/${cmd}_${node_id}" local rcode=0 local start=$SECONDS case $cmd in "untar_cmd") local dist="$1" shift cat "$dist" | virtual_job "$cmd" "$@" 2>"$prefix.err" && \ copy_config $node 2>"$prefix.err" || rcode=$? ;; *) virtual_job "$cmd" "$@" 2>"$prefix.err" || rcode=$? ;; esac echo $rcode > "$prefix.ret" echo -n "$(_date) Job '$cmd' on '$node_id'" if [ $rcode -eq 0 ] then echo " complete in $(($SECONDS - $start)) seconds, " else echo " failed with code: $rcode, " echo "FAILED COMMAND: $($cmd $@)" echo "REASON: $(cat "$prefix.err")" fi return $rcode } start_jobs() { SECONDS=0 local node for node in $NODE_LIST do local node_id="${NODE_ID[$node]}" local prefix="$BASE_RUN/${1}_$node_id" node_job "$@" $node & echo $! > "$prefix.pid" echo "$(_date) Job '$1' on '$node_id' started" done echo "All jobs started" } wait_jobs() { local err=0 local node for node in $NODE_LIST do wait %% 2>$BASE_RUN/wait.err || err=$?; # 127 - no more jobs if [ $err -eq 127 ]; then err=0; break; fi if [ $err -gt 128 ]; then err=0; fi # ignore signals done echo "$(_date) All jobs complete in $SECONDS seconds" return $err } galera-4-26.4.25/tests/scripts/misc.sh000644 000164 177776 00000001277 15107057155 020643 0ustar00jenkinsnogroup000000 000000 # # Miscellaneous functions # # Sleeps variable amount of seconds (by default 1-10) pause() #min_sleep #var_sleep { local min_sleep=${1:-"1"} local var_sleep=${2:-"10"} local p=$(( $RANDOM % var_sleep + min_sleep )) echo "Sleeping for $p sec." sleep $p } # Pauses given processes (load) to perform consistency check consistency_check() #pids { local ret=0 local pids="$@" [ -n "$pids" ] && kill -STOP $pids sleep 1 check || (sleep 2; check) || (sleep 3; check) || ret=$? [ -n "$pids" ] && kill -CONT $pids # processes will receive SIGHUP in case of script exit return $ret } find_mysqld_pid() { ps ax | grep mysqld | grep -w ^\ *$1 > /dev/null } galera-4-26.4.25/tests/scripts/signal.sh000644 000164 177776 00000000560 15107057155 021157 0ustar00jenkinsnogroup000000 000000 # # Sends signal to process # signal_cmd() { local sig=$1 local node=$2 local dir="${NODE_TEST_DIR[$node]}" echo $sig $node case $DBMS in "MYSQL") echo -n "kill -$sig \$(cat $dir/mysql/var/mysqld.pid)" ;; "PGSQL"|*) echo "Not supported" >&2 return 1 ;; esac } signal_node() { node_job signal_cmd "$@" } galera-4-26.4.25/tests/scripts/kill.sh000644 000164 177776 00000001143 15107057155 020633 0ustar00jenkinsnogroup000000 000000 # # Ungracefully kills the process # kill_cmd() { local node=${@:$#} local dir="${NODE_TEST_DIR[$node]}" case $DBMS in "MYSQL") echo -n "kill -9 \$(cat $dir/mysql/var/mysqld.pid)" ;; "PGSQL"|*) echo "Not supported" >&2 return 1 ;; esac } kill_node() { local node=${@:$#} local dir="${NODE_TEST_DIR[$node]}" local pid=$(cat $dir/mysql/var/mysqld.pid) node_job kill_cmd "$@" # wait process to disappear. while find_mysqld_pid $pid do sleep 0.1 done } kill_all() { start_jobs kill_cmd wait_jobs } galera-4-26.4.25/tests/scripts/install.sh000644 000164 177776 00000005613 15107057155 021354 0ustar00jenkinsnogroup000000 000000 # # Routines to install distribution on nodes # untar_cmd() { local node=${@:$#} local path="${NODE_TEST_DIR[$node]}" local base=$path/mysql local data=$base/var local hst=$(hostname -s) local SAVE="([ -d \"$data\" ] && rm -rf \"$data\".saved && mv \"$data\" \"$data\".saved || :) && " local UNTAR="mkdir -p \"$path\" && tar --strip 1 -C \"$path\" -xzf - && " local INIT="\"$base\"/bin/init_db.sh \"$base\" \"$DBMS_ROOT_PSWD\" \"$DBMS_TEST_PSWD\" || : && " local RESTORE="([ -d \"$data\".saved ] && rm -rf \"$data\" && mv \"$data\".saved \"$data\" || :) " echo -n "$SAVE $UNTAR $INIT $RESTORE" } copy_config() { local -r node=$1 local cnf local cnf_dir local ca_src="$BASE_CONF/galera_ca.pem" local ca_dst local key_src="$BASE_CONF/galera_key.pem" local key_dst local cert_src="$BASE_CONF/galera_cert.pem" local cert_dst case $DBMS in MYSQL) common_cnf="$COMMON_MY_CNF" cnf_src="${NODE_MY_CNF[$node]}" cnf_dst="${NODE_TEST_DIR[$node]}/mysql/etc/my.cnf" ca_dst="${NODE_TEST_DIR[$node]}/mysql/var/galera_ca.pem" key_dst="${NODE_TEST_DIR[$node]}/mysql/var/galera_key.pem" cert_dst="${NODE_TEST_DIR[$node]}/mysql/var/galera_cert.pem" ;; PGSQL|*) echo "Unsupported DBMS: '$DBMS'" >&2 return 1 ;; esac if [ -n "$common_cnf" ] || [ -n "$cnf_src" ] then if [ "${NODE_LOCATION[$node]}" = "local" ] then ([ -n "$common_cnf" ] && cat "$common_cnf" && \ [ -n "$cnf_src" ] && cat "$cnf_src") > "$cnf_dst" cat "$ca_src" > "$ca_dst" cat "$key_src" > "$key_dst" cat "$cert_src" > "$cert_dst" else local remote="${NODE_LOCATION[$node]}" ([ -n "$common_cnf" ] && cat "$common_cnf" && \ [ -n "$cnf_src" ] && cat "$cnf_src") | \ ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$remote" "cat > $cnf_dst" cat "$ca_src" | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$remote" "cat > $ca_dst" cat "$key_src" | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$remote" "cat > $key_dst" cat "$cert_src" | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$remote" "cat > $cert_dst" fi fi } copy_file_node() { set -x local -r src_file="$1" local -r dst_file="$2" local -r node="$3" cat "$src_file" | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "${NODE_LOCATION[$node]}" "cat - > ${NODE_TEST_DIR[$node]}/$dst_file" } copy_file() { local -r src_file="$1" local -r dst_file="$2" start_jobs copy_file_node "$src_file" "$dst_file" } install_node() { local dist=$1 node_job untar_cmd "$@" } install() { local dist=$1 start_jobs untar_cmd $dist wait_jobs } galera-4-26.4.25/tests/scripts/remove.sh000644 000164 177776 00000000341 15107057155 021174 0ustar00jenkinsnogroup000000 000000 # # Routines to remove test distribution from nodes # remove_cmd() { local node=${@:$#} local dirn="${NODE_TEST_DIR[$node]}" echo -n 'rm -rf '"$dirn"'/*' } remove() { start_jobs remove_cmd wait_jobs } galera-4-26.4.25/tests/scripts/action.sh000644 000164 177776 00000020422 15107057155 021156 0ustar00jenkinsnogroup000000 000000 # Helper to get status variable value mysql_command() { local node=$1 if [ "${NODE_LOCATION[$node]}" = "local" ] then echo "${NODE_TEST_DIR[$node]}/mysql/bin/mysql" else echo "mysql" fi } cluster_status() { local node=$1 case "$DBMS" in "MYSQL") local command=$(mysql_command $node) local res=$($command -u$DBMS_ROOT_USER -p$DBMS_ROOT_PSWD \ -h${NODE_INCOMING_HOST[$node]} -P${NODE_INCOMING_PORT[$node]} \ --skip-column-names -ss \ -e "SET wsrep_on=0; SHOW STATUS WHERE Variable_name LIKE 'wsrep_cluster_status' OR Variable_name LIKE 'wsrep_cluster_size'" 2>/dev/null) echo -n $res | awk '{ print $4 ":" $2; }' ;; "PGSQL"|*) return -1 esac } mysql_query() { local node=$1 local query=$2 local command=$(mysql_command $node) $command -u$DBMS_ROOT_USER -p$DBMS_ROOT_PSWD \ -h${NODE_INCOMING_HOST[$node]} -P${NODE_INCOMING_PORT[$node]} \ --skip-column-names -ss -e "$query" } wait_node_state() { local node=$1 local state=$2 while true do local res="-1" case "$DBMS" in "MYSQL") res=$(mysql_query $node "SHOW STATUS LIKE 'wsrep_local_state'" \ | awk '{ print $2 }') ;; "PGSQL"|*) return -1 esac if [ "$res" = "$state" ]; then break; fi sleep 1 done } # # Routines to start|stop|check cluster nodes # action_cmd() { local cmd=$1 local node=${@:$#} local nargs=$(( $# - 2 )) # minus cmd and node local args="${@:2:$nargs}" # arguments range from 2 to n-1 local dir="${NODE_TEST_DIR[$node]}" case "$DBMS" in "MYSQL") echo -n "MYSQL_PORT=${NODE_INCOMING_PORT[$node]} "\ "\"$dir/mysql-galera\" $args $cmd" ;; "PGSQL"|*) return -1 ;; esac } # By convention node index is the last in the arguments list. # So we prepend command to the argument list otherwise it'll go after node # index here. start_cmd() { action_cmd "start" "$@" } stop_cmd() { action_cmd "stop" "$@" } restart_cmd() { action_cmd "restart" "$@" } check_cmd() { action_cmd "check" "$@" } dump_cmd() { action_cmd "dump" "$@" } action() { start_jobs "$@" wait_jobs } dump() { action "dump_cmd" "$@" } check() { wait_sync $NODE_LIST || true cmd="check_cmd" ! action "$cmd" "$@" # ! - to ignore possible connection error local -r prefix="$BASE_OUT/$cmd" local node local prev="" local fail="" for node in $NODE_LIST do local node_id="${NODE_ID[$node]}" local out="${prefix}_${node_id}.out" chk=$(cat "$out") # no need to check if file exists: # should be created even if command fails # echo "$node_id: ${chk%% -}" if [ -n "$chk" ] # skip 0-length checksum: the node was down then echo "$chk" | sed s/-/${node_id}/ if [ -z "$prev" ] then prev="$chk" else if [ "$prev" != "$chk" ] then fail="yes" fi fi fi done if [ -z "$fail" ] && [ -n "$prev" ]; then return 0; fi echo "Checksum failed." # for node in $NODE_LIST # do # local node_id="${NODE_ID[$node]}" # echo -n "$node_id: " # cat "${prefix}_$node_id.out" # done return 1 } # Query each node with causal reads on to make sure that slave # queue has been fully processed. # Arguments: list of nodes wait_sync() { local nodes=${@:-$NODE_LIST} local node for node in $nodes do mysql_query "$node" "SET SESSION wsrep_sync_wait=1; select 0;" 1>/dev/null done } start_node() { node_job "start_cmd" "$@" } stop_node() { node_job "stop_cmd" "$@" } restart_node() { node_job "restart_cmd" "$@" } dump_node() { node_job "dump_cmd" "$@" } # unlike bulk check this one returns error when the node could not be checked check_node() { local cmd="check_cmd" node_job "$cmd" "$@" local node_id="${NODE_ID[$1]}" cat "${BASE_OUT}/${cmd}_${node_id}.out" | sed s/-/${node_id}/ return $(cat $BASE_RUN/check_cmd_$node_id.ret) } extra_params() { local node=$1 local extra_params [ -z "$GCOMM_EXTRA_PARAMS" ] && extra_params="?" || extra_params="?${GCOMM_EXTRA_PARAMS}&" # echo "${extra_params}gmcast.listen_addr=tcp://${NODE_GCS_HOST[$node]}:${NODE_GCS_PORT[$node]}" echo "${extra_params}gmcast.listen_addr=tcp://0.0.0.0:${NODE_GCS_PORT[$node]}" } # return GCS address at which node N should connect to group gcs_address() { local node=$1 case "$GCS_TYPE" in "gcomm") local peer=$(( $node - 1 )) # select previous node as connection peer # local peer=0 # use the first node as a connection handle if [ $peer -lt 0 ]; then peer=$NODE_MAX; fi # rollover echo "'gcomm://${NODE_GCS_HOST[$peer]}:${NODE_GCS_PORT[$peer]}$(extra_params $node)'" ;; "vsbes") echo "'vsbes://$VSBES_ADDRESS'" ;; *) return 1 ;; esac } # start/restart nodes in group mode. _cluster_up() { local -r cmd=$1 shift SECONDS=0 # for wait_jobs for node in $NODE_LIST do echo "Starting ${NODE_ID[$node]}" if [ $node -eq 0 ] then # must make sure 1st node completely operational case "$GCS_TYPE" in # "gcomm") $cmd "-g 'gcomm://:${NODE_GCS_PORT[$node]}$(extra_params $node)'" "$@" 0 ;; "gcomm") $cmd "-g $(gcs_address $node) --mysql-opt --wsrep-new-cluster" "$@" 0 ;; "vsbes") $cmd "-g 'vsbes://$VSBES_ADDRESS'" "$@" 0 ;; esac else $cmd "-g $(gcs_address $node)" "$@" $node & fi done wait_jobs } # start/restart nodes in group mode. bootstrap() { SECONDS=0 # for wait_jobs local cnt=0 for node in $NODE_LIST do echo "Starting ${NODE_ID[$node]}" start_node "-g $(gcs_address $node)" "$@" $node & cnt=$(($cnt + 1)) done # TODO: Poll until all have reached non-prim for node in 0 # only one node is sufficient do while true do st=$(cluster_status $node) if test "x$st" = "xnon-Primary:$cnt" then break; fi sleep 1 done done # TODO: Figure out how to do this in DBMS indepent way case "$DBMS" in "MYSQL") mysql -u$DBMS_ROOT_USER -p$DBMS_ROOT_PSWD \ -h${NODE_INCOMING_HOST[0]} \ -P${NODE_INCOMING_PORT[0]} \ -e "SET GLOBAL wsrep_provider_options='pc.bootstrap=1'" ;; "PGSQL"|*) return -1 ;; esac # Jobs will finish when nodes reach primary wait_jobs } start() { _cluster_up start_node "$@" } _get_status_var() { # INFORMATION_SCHEMA.GLOBAL_STATUS is deprecated in MySQL >= 5.7 # SHOW GLOBAL STATUS seems to be more compatible between the versions. # mysql_query "$1" "SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = '$2'" 2>/dev/null || echo -1 mysql_query "$1" "SHOW GLOBAL STATUS LIKE '$2'" | tail -n1 | cut -f 2- [ 0 = ${PIPESTATUS[0]} ] || echo -1 } stop() { SECONDS=0 local node # stop all nodes but the first one (nearly) simultaneously for node in $(seq $NODE_MAX -1 1) do echo "Stopping ${NODE_ID[$node]}" stop_node "$@ $node" & sleep 1 done node=0 # Here we don't care if the node lost PC (it should not but it may) # If it did there is little we can do short of bootstrapping it while [ $(_get_status_var "$node" "wsrep_cluster_size") -gt 1 ] do sleep 0.2 done if [ $(_get_status_var "$node" "wsrep_cluster_status") = "non-Primary" ] then mysql_query "$node" "SET GLOBAL wsrep_provider_options='pc.bootstrap=1'" while [ $(_get_status_var "$node" "wsrep_cluster_status") = "non-Primary" ] do sleep 0.2 done fi echo "Stopping ${NODE_ID[$node]}" stop_node "$@ $node" & wait_jobs } restart() { stop _cluster_up start_node "$@" } galera-4-26.4.25/tests/scripts/README000644 000164 177776 00000003374 15107057155 020234 0ustar00jenkinsnogroup000000 000000 THIS DIRECTORY This directory contains a library of cluster manipulation scripts. The main idea is facilitation of parallel operations and minimizaiton of code duplicaiton. Executable file is command.sh. It sources the configuration and the rest of the scripts which implement different functions: jobs.sh - the main file which defines generic command execution framework. It implements parallel and per-node command execution routines and enables unified logging behaviour. Other scripts just have to define the actual command functions following certain conventions, so far there is only one: node index is appended to the list of command line parameters, then everything is passed to the function. install.sh - implements a cluster-wide 'install' command which takes a name of the distribution file as the first argument remove.sh - implements a cluster-wide "remove" command. action.sh - usual start/stop/restart/check commands, both cluster-wide and per node. kill.sh - a per-node kill -9 command Assumed convention: cluster-wide commands are just commnads, per-node commands have _node suffix. E.g. ./command.sh start starts all nodes, ./command.sh stop_node 1 stops only node number 1. Numbering is 0-based. It is intended that each command should implement its own help. SPECIAL FILES AND DIRECTORIES: ../conf directory contains configuration files. Each command creates at least 4 files named _. where is: out - standard output of the command err - standard error of the command pid - pid of the porcess executing command ret - return code of the command "out" files are placed into $BASE_OUT directory, other files are placed in $BASE_RUN. The reason to separate is unclear. galera-4-26.4.25/tests/scripts/command.sh000755 000164 177776 00000001377 15107057155 021332 0ustar00jenkinsnogroup000000 000000 #!/bin/bash -eu help() { echo "Usage: $0 [args]" echo "Cluster commands: install, remove, start, stop, check" echo "Node commands: start_node, stop_node, restart_node, check_node," echo " kill_node" echo "Command help: $0 help" } if [ $# -eq 0 ]; then help >&2; exit 1; fi declare -r DIST_BASE=$(cd $(dirname $0)/..; pwd -P) declare -r DIST_SCRIPTS="$DIST_BASE/scripts" # later create config.sh to read config from command line options declare -r TEST_BASE=${TEST_BASE:-"$DIST_BASE"} . "$TEST_BASE/conf/main.conf" . $DIST_SCRIPTS/jobs.sh . $DIST_SCRIPTS/install.sh . $DIST_SCRIPTS/remove.sh . $DIST_SCRIPTS/action.sh . $DIST_SCRIPTS/kill.sh . $DIST_SCRIPTS/signal.sh command=$1 shift $command "$@" galera-4-26.4.25/tests/run.sh000755 000164 177776 00000002632 15107057155 017024 0ustar00jenkinsnogroup000000 000000 #!/bin/bash set -e # This script assumes that galera cluster is alredy installed and configured # This is location of this script. _HOME suffix preferred to _ROOT to avoid # confusion THIS_HOME=$(cd $(dirname $0); pwd -P) # Optional configuration file if test -n "$GALERA_TEST_CONFIG" then . "$GALERA_TEST_CONFIG" fi GALERA_TESTS_HOME=${GALERA_TESTS_HOME:-$THIS_HOME} GALERA_RESULTS_HOME=${GALERA_RESULTS_HOME:-$GALERA_TESTS_HOME/results} # Incoming cluster address (load balancer) export GALERA_CLUSTER_IP=${GALERA_CLUSTER_IP:-"127.0.0.1"} export GALERA_CLUSTER_PORT=${GALERA_CLUSTER_PORT:-3306} # List of tests to run GALERA_TESTS=${GALERA_TESTS:-"sqlgen dbt2 dots"} # This is needed for native load balancing and consistency checking export GALERA_NODES_IPS=${GALERA_NODE_IPS:?"GALERA_NODE_IPS not set"} export GALERA_NODES_PORTS=${GALERA_NODE_PORTS:?"GALERA_NODE_PORTS not set"} # Create a results directory for this run GALERA_DATE=$(date +%Y-%m-%d_%H:%M:%S) mkdir -p $GALERA_RESULTS_HOME/$GALERA_DATE declare TESTS_FAILED TESTS_FAILED=0 for TEST in $GALERA_TESTS do export GALERA_RESULT_DIR=$GALERA_RESULTS_HOME/$GALERA_DATE/$TEST mkdir -p $GALERA_RESULT_DIR echo -n "Running $TEST... " $GALERA_TESTS_HOME/test_$TEST/run.sh && echo "passed" \ || { TESTS_FAILED=$[ $TESTS_FAILED + 1 ]; echo "failed"; } done if [ $TESTS_FAILED != "0" ] then echo "Tests failed: $TESTS_FAILED" exit 1 fi # galera-4-26.4.25/tests/run_test_set.sh000755 000164 177776 00000000642 15107057155 020735 0ustar00jenkinsnogroup000000 000000 #!/bin/sh # # This is a wrapper script for ./tap/run_test_set.pl # BASE_DIR=$(cd $(dirname $0); pwd -P) cd $BASE_DIR res=$? if test $res != 0 then echo "Failed to change directory to $BASE_DIR" exit 1 fi export TEST_BASE_DIR=$BASE_DIR . $BASE_DIR/conf/cluster.conf $BASE_DIR/tap/run_test_set.pl $@ res=$? if test $res != 0 then echo "Failed to run test set, exit code: $res" exit 1 fi exit 0 galera-4-26.4.25/GALERA_VERSION000644 000164 177776 00000000141 15107057155 016573 0ustar00jenkinsnogroup000000 000000 GALERA_VERSION_WSREP_API=26 GALERA_VERSION_MAJOR=4 GALERA_VERSION_MINOR=25 GALERA_VERSION_EXTRA= galera-4-26.4.25/CMakeLists.txt000644 000164 177776 00000010330 15107057155 017251 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2025 Codership Oy # message(STATUS "CMake version ${CMAKE_VERSION}") cmake_minimum_required(VERSION 2.8...4.0) project(galera-4) include(CheckCCompilerFlag) include(CheckCXXCompilerFlag) include(CheckIncludeFile) include(CheckIncludeFileCXX) include(CheckCXXCompilerFlag) include(CheckLibraryExists) include_directories( ${PROJECT_SOURCE_DIR} ${PROJECT_SOURCE_DIR}/common ${PROJECT_SOURCE_DIR}/galera/src ${PROJECT_SOURCE_DIR}/galerautils/src ${PROJECT_SOURCE_DIR}/gcache/src ${PROJECT_SOURCE_DIR}/gcomm/src ${PROJECT_SOURCE_DIR}/gcs/src ${PROJECT_SOURCE_DIR}/wsrep/src ) # Options to control compiler options and error behavior. option(GALERA_MAINTAINER_MODE "Fail compilation on warnings" OFF) # Galera library options. option(GALERA_WITH_SSL "Compile Galera with SSL" ON) option(GALERA_VERSION_SCRIPT "Limit symbols visible from Galera DSO" ON) option(GALERA_STATIC "Build statically linked binaries" OFF) option(GALERA_SOURCE "Configure for source package only, skipping build configuration" OFF) # Developer options and instrumentation. option(GALERA_WITH_ASAN "Enable ASAN instrumentation" OFF) option(GALERA_WITH_VALGRIND "Optionally run tests with valgrind" OFF) option(GALERA_WITH_COVERAGE "Compile with coverage instrumentation" OFF) option(GALERA_GCS_SM_DEBUG "Enable dumping of send monitor state and history" OFF) option(GALERA_GU_DEBUG_MUTEX "Enable mutex debug instrumentation" OFF) option(GALERA_GU_DBUG_ON "Enable sync point macros (ON for Debug builds)" OFF) option(GALERA_GLIBCXX_DEBUG "Enable glibc++ debug instrumentation" OFF) # # Set cmake policies before doing any checks. # # In CMake 3.12 and above, the # # * ``check_include_file`` macro in the ``CheckIncludeFile`` module, the # * ``check_include_file_cxx`` macro in the # ``CheckIncludeFileCXX`` module, and the # * ``check_include_files`` macro in the ``CheckIncludeFiles`` module # # now prefer to link the check executable to the libraries listed in the # ``CMAKE_REQUIRED_LIBRARIES`` variable. This policy provides compatibility # with projects that have not been updated to expect this behavior. if (POLICY CMP0075) CMAKE_POLICY(SET CMP0075 OLD) endif() # Determine version first to have it near the top of build log. include(cmake/version.cmake) include(cmake/package.cmake) # Do not execute other configuration steps if building source only. if (GALERA_SOURCE) return() endif() # Source compiler/OS settings first as it sets used C/C++ standards, # common compiler flags and include/link paths. include(cmake/compiler.cmake) include(cmake/alignment.cmake) include(cmake/os.cmake) # Instrumentation. include(cmake/asan.cmake) # Common definitions for all modues. include(cmake/common.cmake) # Libraries, language library features. include(cmake/ssl.cmake) include(cmake/asio.cmake) include(cmake/array.cmake) include(cmake/custom_boost.cmake) include(cmake/boost.cmake) include(cmake/crc32c.cmake) include(cmake/endian.cmake) include(cmake/shared_ptr.cmake) include(cmake/unordered.cmake) include(cmake/check.cmake) include(cmake/memorycheck.cmake) include(cmake/coverage.cmake) # Keep the maintainer mode as last to avoid feature checks failing # in compiler warnings. include(cmake/maintainer_mode.cmake) include(CTest) enable_testing() add_subdirectory(galerautils) add_subdirectory(gcomm) add_subdirectory(gcache) add_subdirectory(gcs) add_subdirectory(garb) add_subdirectory(galera) add_subdirectory(scripts/packages) add_subdirectory(wsrep/tests) if (NOT ${CMAKE_SYSTEM_NAME} MATCHES ".*BSD") install(FILES ${PROJECT_SOURCE_DIR}/AUTHORS ${PROJECT_SOURCE_DIR}/COPYING ${PROJECT_SOURCE_DIR}/README DESTINATION doc) install(FILES ${PROJECT_SOURCE_DIR}/asio/LICENSE_1_0.txt DESTINATION doc RENAME LICENSE.asio) endif() message(STATUS "") message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") get_directory_property(DirDefs COMPILE_DEFINITIONS) message(STATUS "COMPILE_DEFINITIONS: ${DirDefs}") message(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}") message(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}") message(STATUS "CMAKE_EXE_LINKER_FLAGS: ${CMAKE_EXE_LINKER_FLAGS}") message(STATUS "GALERA_SYSTEM_LIBS: ${GALERA_SYSTEM_LIBS}") message(STATUS "GALERA_UNIT_TEST_LIBS: ${GALERA_UNIT_TEST_LIBS}") message(STATUS "") galera-4-26.4.25/CONTRIBUTORS.txt000644 000164 177776 00000003225 15107057155 017214 0ustar00jenkinsnogroup000000 000000 All contributors are required to add their name and [Github username/email] to this file in connection with their first contribution. If you are making a contribution on behalf of a company, you should add the said company name. By adding your name and [Github username/email] to this file you agree that your contribution is a contribution under a contributor agreement between you and Codership Oy. To the extent that you are an employee of a company and contribute in that role, you confirm that your contribution is a contribution under the contribution license agreement between your employer and Codership Oy; and that you have the authorization to give such confirmation. You confirm that you have read, understood and signed the contributor license agreement applicable to you. For the individual contributor agreement see file CONTRIBUTOR_AGREEMENT.txt in the same directory as this file. Authors from Codership Oy: * Alexey Yurchenko , Codership Oy * Seppo Jaakola , Codership Oy * Teemu Ollakka , Codership Oy * Daniele Sciascia , Codership Oy * Philip Stoev , Codership Oy * Mario Karuza , Codership Oy [Codership employees, add name and email/username above this line, but leave this line intact] Other contributors: * Stefan Langenmaier * Christian Hesse * Andrzej Godziuk * Otto Kekäläinen [add name and email/username above this line, but leave this line intact] galera-4-26.4.25/asio/000755 000164 177776 00000000000 15107057160 015443 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/000755 000164 177776 00000000000 15107057160 016376 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/system_error.hpp000644 000164 177776 00000005501 15107057155 021651 0ustar00jenkinsnogroup000000 000000 // // system_error.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SYSTEM_ERROR_HPP #define ASIO_SYSTEM_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SYSTEM_ERROR) # include #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) # include # include # include # include "asio/error_code.hpp" # include "asio/detail/scoped_ptr.hpp" #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::system_error system_error; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// The system_error class is used to represent system conditions that /// prevent the library from operating correctly. class system_error : public std::exception { public: /// Construct with an error code. system_error(const error_code& ec) : code_(ec), context_() { } /// Construct with an error code and context. system_error(const error_code& ec, const std::string& context) : code_(ec), context_(context) { } /// Copy constructor. system_error(const system_error& other) : std::exception(other), code_(other.code_), context_(other.context_), what_() { } /// Destructor. virtual ~system_error() throw () { } /// Assignment operator. system_error& operator=(const system_error& e) { context_ = e.context_; code_ = e.code_; what_.reset(); return *this; } /// Get a string representation of the exception. virtual const char* what() const throw () { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { if (!what_.get()) { std::string tmp(context_); if (tmp.length()) tmp += ": "; tmp += code_.message(); what_.reset(new std::string(tmp)); } return what_->c_str(); } #if !defined(ASIO_NO_EXCEPTIONS) catch (std::exception&) { return "system_error"; } #endif // !defined(ASIO_NO_EXCEPTIONS) } /// Get the error code associated with the exception. error_code code() const { return code_; } private: // The code associated with the error. error_code code_; // The context associated with the error. std::string context_; // The string representation of the error. mutable asio::detail::scoped_ptr what_; }; #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SYSTEM_ERROR_HPP galera-4-26.4.25/asio/asio/basic_deadline_timer.hpp000644 000164 177776 00000054503 15107057155 023230 0ustar00jenkinsnogroup000000 000000 // // basic_deadline_timer.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_DEADLINE_TIMER_HPP #define ASIO_BASIC_DEADLINE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/deadline_timer_service.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/time_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides waitable timer functionality. /** * The basic_deadline_timer class template provides the ability to perform a * blocking or asynchronous wait for a timer to expire. * * A deadline timer is always in one of two states: "expired" or "not expired". * If the wait() or async_wait() function is called on an expired timer, the * wait operation will complete immediately. * * Most applications will use the asio::deadline_timer typedef. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Examples * Performing a blocking wait: * @code * // Construct a timer without setting an expiry time. * asio::deadline_timer timer(my_context); * * // Set an expiry time relative to now. * timer.expires_from_now(boost::posix_time::seconds(5)); * * // Wait for the timer to expire. * timer.wait(); * @endcode * * @par * Performing an asynchronous wait: * @code * void handler(const asio::error_code& error) * { * if (!error) * { * // Timer expired. * } * } * * ... * * // Construct a timer with an absolute expiry time. * asio::deadline_timer timer(my_context, * boost::posix_time::time_from_string("2005-12-07 23:59:59.000")); * * // Start an asynchronous wait. * timer.async_wait(handler); * @endcode * * @par Changing an active deadline_timer's expiry time * * Changing the expiry time of a timer while there are pending asynchronous * waits causes those wait operations to be cancelled. To ensure that the action * associated with the timer is performed only once, use something like this: * used: * * @code * void on_some_event() * { * if (my_timer.expires_from_now(seconds(5)) > 0) * { * // We managed to cancel the timer. Start new asynchronous wait. * my_timer.async_wait(on_timeout); * } * else * { * // Too late, timer has already expired! * } * } * * void on_timeout(const asio::error_code& e) * { * if (e != asio::error::operation_aborted) * { * // Timer was not cancelled, take necessary action. * } * } * @endcode * * @li The asio::basic_deadline_timer::expires_from_now() function * cancels any pending asynchronous waits, and returns the number of * asynchronous waits that were cancelled. If it returns 0 then you were too * late and the wait handler has already been executed, or will soon be * executed. If it returns 1 then the wait handler was successfully cancelled. * * @li If a wait handler is cancelled, the asio::error_code passed to * it contains the value asio::error::operation_aborted. */ template , typename Executor = executor> class basic_deadline_timer { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The time traits type. typedef TimeTraits traits_type; /// The time type. typedef typename traits_type::time_type time_type; /// The duration type. typedef typename traits_type::duration_type duration_type; /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_from_now() functions must be called to set an * expiry time before the timer can be waited on. * * @param ex The I/O executor that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. */ explicit basic_deadline_timer(const executor_type& ex) : impl_(ex) { } /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_from_now() functions must be called to set an * expiry time before the timer can be waited on. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. */ template explicit basic_deadline_timer(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param ex The I/O executor that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ basic_deadline_timer(const executor_type& ex, const time_type& expiry_time) : impl_(ex) { asio::error_code ec; impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ template basic_deadline_timer(ExecutionContext& context, const time_type& expiry_time, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param ex The I/O executor that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ basic_deadline_timer(const executor_type& ex, const duration_type& expiry_time) : impl_(ex) { asio::error_code ec; impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ template basic_deadline_timer(ExecutionContext& context, const duration_type& expiry_time, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_deadline_timer from another. /** * This constructor moves a timer from one object to another. * * @param other The other basic_deadline_timer object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_deadline_timer(const executor_type&) * constructor. */ basic_deadline_timer(basic_deadline_timer&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_deadline_timer from another. /** * This assignment operator moves a timer from one object to another. Cancels * any outstanding asynchronous operations associated with the target object. * * @param other The other basic_deadline_timer object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_deadline_timer(const executor_type&) * constructor. */ basic_deadline_timer& operator=(basic_deadline_timer&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the timer. /** * This function destroys the timer, cancelling any outstanding asynchronous * wait operations associated with the timer as if by calling @c cancel. */ ~basic_deadline_timer() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel() { asio::error_code ec; std::size_t s = impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); return s; } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel(asio::error_code& ec) { return impl_.get_service().cancel(impl_.get_implementation(), ec); } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one() { asio::error_code ec; std::size_t s = impl_.get_service().cancel_one( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel_one"); return s; } /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one(asio::error_code& ec) { return impl_.get_service().cancel_one(impl_.get_implementation(), ec); } /// Get the timer's expiry time as an absolute time. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ time_type expires_at() const { return impl_.get_service().expires_at(impl_.get_implementation()); } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time) { asio::error_code ec; std::size_t s = impl_.get_service().expires_at( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); return s; } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_type& expiry_time, asio::error_code& ec) { return impl_.get_service().expires_at( impl_.get_implementation(), expiry_time, ec); } /// Get the timer's expiry time relative to now. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ duration_type expires_from_now() const { return impl_.get_service().expires_from_now(impl_.get_implementation()); } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time) { asio::error_code ec; std::size_t s = impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); return s; } /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration_type& expiry_time, asio::error_code& ec) { return impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), ec); } /// Start an asynchronous wait on the timer. /** * This function may be used to initiate an asynchronous wait against the * timer. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li The timer has expired. * * @li The timer was cancelled, in which case the handler is passed the error * code asio::error::operation_aborted. * * @param handler The handler to be called when the timer expires. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { return async_initiate( initiate_async_wait(), handler, this); } private: // Disallow copying and assignment. basic_deadline_timer(const basic_deadline_timer&) ASIO_DELETED; basic_deadline_timer& operator=( const basic_deadline_timer&) ASIO_DELETED; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(WaitHandler) handler, basic_deadline_timer* self) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), handler2.value, self->impl_.get_implementation_executor()); } }; detail::io_object_impl< detail::deadline_timer_service, Executor> impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_BASIC_DEADLINE_TIMER_HPP galera-4-26.4.25/asio/asio/io_context_strand.hpp000644 000164 177776 00000031704 15107057155 022646 0ustar00jenkinsnogroup000000 000000 // // io_context_strand.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_CONTEXT_STRAND_HPP #define ASIO_IO_CONTEXT_STRAND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_EXTENSIONS) #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides serialised handler execution. /** * The io_context::strand class provides the ability to post and dispatch * handlers with the guarantee that none of those handlers will execute * concurrently. * * @par Order of handler invocation * Given: * * @li a strand object @c s * * @li an object @c a meeting completion handler requirements * * @li an object @c a1 which is an arbitrary copy of @c a made by the * implementation * * @li an object @c b meeting completion handler requirements * * @li an object @c b1 which is an arbitrary copy of @c b made by the * implementation * * if any of the following conditions are true: * * @li @c s.post(a) happens-before @c s.post(b) * * @li @c s.post(a) happens-before @c s.dispatch(b), where the latter is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.post(b), where the former is * performed outside the strand * * @li @c s.dispatch(a) happens-before @c s.dispatch(b), where both are * performed outside the strand * * then @c asio_handler_invoke(a1, &a1) happens-before * @c asio_handler_invoke(b1, &b1). * * Note that in the following case: * @code async_op_1(..., s.wrap(a)); * async_op_2(..., s.wrap(b)); @endcode * the completion of the first async operation will perform @c s.dispatch(a), * and the second will perform @c s.dispatch(b), but the order in which those * are performed is unspecified. That is, you cannot state whether one * happens-before the other. Therefore none of the above conditions are met and * no ordering guarantee is made. * * @note The implementation makes no guarantee that handlers posted or * dispatched through different @c strand objects will be invoked concurrently. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Dispatcher. */ class io_context::strand { public: /// Constructor. /** * Constructs the strand. * * @param io_context The io_context object that the strand will use to * dispatch handlers that are ready to be run. */ explicit strand(asio::io_context& io_context) : service_(asio::use_service< asio::detail::strand_service>(io_context)) { service_.construct(impl_); } /// Destructor. /** * Destroys a strand. * * Handlers posted through the strand that have not yet been invoked will * still be dispatched in a way that meets the guarantee of non-concurrency. */ ~strand() { } /// Obtain the underlying execution context. asio::io_context& context() const ASIO_NOEXCEPT { return service_.get_io_context(); } /// Inform the strand that it has some outstanding work to do. /** * The strand delegates this call to its underlying io_context. */ void on_work_started() const ASIO_NOEXCEPT { context().get_executor().on_work_started(); } /// Inform the strand that some work is no longer outstanding. /** * The strand delegates this call to its underlying io_context. */ void on_work_finished() const ASIO_NOEXCEPT { context().get_executor().on_work_finished(); } /// Request the strand to invoke the given function object. /** * This function is used to ask the strand to execute the given function * object on its underlying io_context. The function object will be executed * inside this function if the strand is not otherwise busy and if the * underlying io_context's executor's @c dispatch() function is also able to * execute the function before returning. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typename decay::type tmp(ASIO_MOVE_CAST(Function)(f)); service_.dispatch(impl_, tmp); (void)a; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use asio::dispatch().) Request the strand to invoke /// the given handler. /** * This function is used to ask the strand to execute the given handler. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The handler may be executed * inside this function if the guarantee can be met. If this function is * called from within a handler that was posted or dispatched through the same * strand, then the new handler will be executed immediately. * * The strand's guarantee is in addition to the guarantee provided by the * underlying io_context. The io_context guarantees that the handler will only * be called in a thread in which the io_context's run member function is * currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler) { return async_initiate( initiate_dispatch(), handler, this); } #endif // !defined(ASIO_NO_DEPRECATED) /// Request the strand to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled to run by the underlying io_context. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typename decay::type tmp(ASIO_MOVE_CAST(Function)(f)); service_.post(impl_, tmp); (void)a; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use asio::post().) Request the strand to invoke the /// given handler and return immediately. /** * This function is used to ask the strand to execute the given handler, but * without allowing the strand to call the handler from inside this function. * * The strand object guarantees that handlers posted or dispatched through * the strand will not be executed concurrently. The strand's guarantee is in * addition to the guarantee provided by the underlying io_context. The * io_context guarantees that the handler will only be called in a thread in * which the io_context's run member function is currently being invoked. * * @param handler The handler to be called. The strand will make a copy of the * handler object as required. The function signature of the handler must be: * @code void handler(); @endcode */ template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler) { return async_initiate( initiate_post(), handler, this); } #endif // !defined(ASIO_NO_DEPRECATED) /// Request the strand to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled to run by the underlying io_context. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typename decay::type tmp(ASIO_MOVE_CAST(Function)(f)); service_.post(impl_, tmp); (void)a; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use asio::bind_executor().) Create a new handler that /// automatically dispatches the wrapped handler on the strand. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the strand's * dispatch function. * * @param handler The handler to be wrapped. The strand will make a copy of * the handler object as required. The function signature of the handler must * be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the strand's dispatch function. Given a function object with the signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code strand.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code strand.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler) { return detail::wrapped_handler(*this, handler); } #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the strand is running in the current thread. /** * @return @c true if the current thread is executing a handler that was * submitted to the strand using post(), dispatch() or wrap(). Otherwise * returns @c false. */ bool running_in_this_thread() const ASIO_NOEXCEPT { return service_.running_in_this_thread(impl_); } /// Compare two strands for equality. /** * Two strands are equal if they refer to the same ordered, non-concurrent * state. */ friend bool operator==(const strand& a, const strand& b) ASIO_NOEXCEPT { return a.impl_ == b.impl_; } /// Compare two strands for inequality. /** * Two strands are equal if they refer to the same ordered, non-concurrent * state. */ friend bool operator!=(const strand& a, const strand& b) ASIO_NOEXCEPT { return a.impl_ != b.impl_; } private: #if !defined(ASIO_NO_DEPRECATED) struct initiate_dispatch { template void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler, strand* self) const { // If you get an error on the following line it means that your // handler does not meet the documented type requirements for a // LegacyCompletionHandler. ASIO_LEGACY_COMPLETION_HANDLER_CHECK( LegacyCompletionHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->service_.dispatch(self->impl_, handler2.value); } }; struct initiate_post { template void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler, strand* self) const { // If you get an error on the following line it means that your // handler does not meet the documented type requirements for a // LegacyCompletionHandler. ASIO_LEGACY_COMPLETION_HANDLER_CHECK( LegacyCompletionHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->service_.post(self->impl_, handler2.value); } }; #endif // !defined(ASIO_NO_DEPRECATED) asio::detail::strand_service& service_; mutable asio::detail::strand_service::implementation_type impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #endif // ASIO_IO_CONTEXT_STRAND_HPP galera-4-26.4.25/asio/asio/basic_io_object.hpp000644 000164 177776 00000017300 15107057155 022212 0ustar00jenkinsnogroup000000 000000 // // basic_io_object.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_IO_OBJECT_HPP #define ASIO_BASIC_IO_OBJECT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_MOVE) namespace detail { // Type trait used to determine whether a service supports move. template class service_has_move { private: typedef IoObjectService service_type; typedef typename service_type::implementation_type implementation_type; template static auto asio_service_has_move_eval(T* t, U* u) -> decltype(t->move_construct(*u, *u), char()); static char (&asio_service_has_move_eval(...))[2]; public: static const bool value = sizeof(asio_service_has_move_eval( static_cast(0), static_cast(0))) == 1; }; } #endif // defined(ASIO_HAS_MOVE) /// Base class for all I/O objects. /** * @note All I/O objects are non-copyable. However, when using C++0x, certain * I/O objects do support move construction and move assignment. */ #if !defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) template #else template ::value> #endif class basic_io_object { public: /// The type of the service that will be used to provide I/O operations. typedef IoObjectService service_type; /// The underlying implementation type of I/O object. typedef typename service_type::implementation_type implementation_type; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use get_executor().) Get the io_context associated with the /// object. /** * This function may be used to obtain the io_context object that the I/O * object uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_context object that the I/O object will use * to dispatch handlers. Ownership is not transferred to the caller. */ asio::io_context& get_io_context() { return service_.get_io_context(); } /// (Deprecated: Use get_executor().) Get the io_context associated with the /// object. /** * This function may be used to obtain the io_context object that the I/O * object uses to dispatch handlers for asynchronous operations. * * @return A reference to the io_context object that the I/O object will use * to dispatch handlers. Ownership is not transferred to the caller. */ asio::io_context& get_io_service() { return service_.get_io_context(); } #endif // !defined(ASIO_NO_DEPRECATED) /// The type of the executor associated with the object. typedef asio::io_context::executor_type executor_type; /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return service_.get_io_context().get_executor(); } protected: /// Construct a basic_io_object. /** * Performs: * @code get_service().construct(get_implementation()); @endcode */ explicit basic_io_object(asio::io_context& io_context) : service_(asio::use_service(io_context)) { service_.construct(implementation_); } #if defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_io_object. /** * Performs: * @code get_service().move_construct( * get_implementation(), other.get_implementation()); @endcode * * @note Available only for services that support movability, */ basic_io_object(basic_io_object&& other); /// Move-assign a basic_io_object. /** * Performs: * @code get_service().move_assign(get_implementation(), * other.get_service(), other.get_implementation()); @endcode * * @note Available only for services that support movability, */ basic_io_object& operator=(basic_io_object&& other); /// Perform a converting move-construction of a basic_io_object. template basic_io_object(IoObjectService1& other_service, typename IoObjectService1::implementation_type& other_implementation); #endif // defined(GENERATING_DOCUMENTATION) /// Protected destructor to prevent deletion through this type. /** * Performs: * @code get_service().destroy(get_implementation()); @endcode */ ~basic_io_object() { service_.destroy(implementation_); } /// Get the service associated with the I/O object. service_type& get_service() { return service_; } /// Get the service associated with the I/O object. const service_type& get_service() const { return service_; } /// Get the underlying implementation of the I/O object. implementation_type& get_implementation() { return implementation_; } /// Get the underlying implementation of the I/O object. const implementation_type& get_implementation() const { return implementation_; } private: basic_io_object(const basic_io_object&); basic_io_object& operator=(const basic_io_object&); // The service associated with the I/O object. service_type& service_; /// The underlying implementation of the I/O object. implementation_type implementation_; }; #if defined(ASIO_HAS_MOVE) // Specialisation for movable objects. template class basic_io_object { public: typedef IoObjectService service_type; typedef typename service_type::implementation_type implementation_type; #if !defined(ASIO_NO_DEPRECATED) asio::io_context& get_io_context() { return service_->get_io_context(); } asio::io_context& get_io_service() { return service_->get_io_context(); } #endif // !defined(ASIO_NO_DEPRECATED) typedef asio::io_context::executor_type executor_type; executor_type get_executor() ASIO_NOEXCEPT { return service_->get_io_context().get_executor(); } protected: explicit basic_io_object(asio::io_context& io_context) : service_(&asio::use_service(io_context)) { service_->construct(implementation_); } basic_io_object(basic_io_object&& other) : service_(&other.get_service()) { service_->move_construct(implementation_, other.implementation_); } template basic_io_object(IoObjectService1& other_service, typename IoObjectService1::implementation_type& other_implementation) : service_(&asio::use_service( other_service.get_io_context())) { service_->converting_move_construct(implementation_, other_service, other_implementation); } ~basic_io_object() { service_->destroy(implementation_); } basic_io_object& operator=(basic_io_object&& other) { service_->move_assign(implementation_, *other.service_, other.implementation_); service_ = other.service_; return *this; } service_type& get_service() { return *service_; } const service_type& get_service() const { return *service_; } implementation_type& get_implementation() { return implementation_; } const implementation_type& get_implementation() const { return implementation_; } private: basic_io_object(const basic_io_object&); void operator=(const basic_io_object&); IoObjectService* service_; implementation_type implementation_; }; #endif // defined(ASIO_HAS_MOVE) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_IO_OBJECT_HPP galera-4-26.4.25/asio/asio/system_timer.hpp000644 000164 177776 00000002303 15107057155 021635 0ustar00jenkinsnogroup000000 000000 // // system_timer.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SYSTEM_TIMER_HPP #define ASIO_SYSTEM_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #include "asio/basic_waitable_timer.hpp" #include "asio/detail/chrono.hpp" namespace asio { /// Typedef for a timer based on the system clock. /** * This typedef uses the C++11 @c <chrono> standard library facility, if * available. Otherwise, it may use the Boost.Chrono library. To explicitly * utilise Boost.Chrono, use the basic_waitable_timer template directly: * @code * typedef basic_waitable_timer timer; * @endcode */ typedef basic_waitable_timer system_timer; } // namespace asio #endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SYSTEM_TIMER_HPP galera-4-26.4.25/asio/asio/executor_work_guard.hpp000644 000164 177776 00000011616 15107057155 023202 0ustar00jenkinsnogroup000000 000000 // // executor_work_guard.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_EXECUTOR_WORK_GUARD_HPP #define ASIO_EXECUTOR_WORK_GUARD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/type_traits.hpp" #include "asio/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// An object of type @c executor_work_guard controls ownership of executor work /// within a scope. template class executor_work_guard { public: /// The underlying executor type. typedef Executor executor_type; /// Constructs a @c executor_work_guard object for the specified executor. /** * Stores a copy of @c e and calls on_work_started() on it. */ explicit executor_work_guard(const executor_type& e) ASIO_NOEXCEPT : executor_(e), owns_(true) { executor_.on_work_started(); } /// Copy constructor. executor_work_guard(const executor_work_guard& other) ASIO_NOEXCEPT : executor_(other.executor_), owns_(other.owns_) { if (owns_) executor_.on_work_started(); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. executor_work_guard(executor_work_guard&& other) ASIO_NOEXCEPT : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)), owns_(other.owns_) { other.owns_ = false; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. /** * Unless the object has already been reset, or is in a moved-from state, * calls on_work_finished() on the stored executor. */ ~executor_work_guard() { if (owns_) executor_.on_work_finished(); } /// Obtain the associated executor. executor_type get_executor() const ASIO_NOEXCEPT { return executor_; } /// Whether the executor_work_guard object owns some outstanding work. bool owns_work() const ASIO_NOEXCEPT { return owns_; } /// Indicate that the work is no longer outstanding. /* * Unless the object has already been reset, or is in a moved-from state, * calls on_work_finished() on the stored executor. */ void reset() ASIO_NOEXCEPT { if (owns_) { executor_.on_work_finished(); owns_ = false; } } private: // Disallow assignment. executor_work_guard& operator=(const executor_work_guard&); executor_type executor_; bool owns_; }; /// Create an @ref executor_work_guard object. template inline executor_work_guard make_work_guard(const Executor& ex, typename enable_if::value>::type* = 0) { return executor_work_guard(ex); } /// Create an @ref executor_work_guard object. template inline executor_work_guard make_work_guard(ExecutionContext& ctx, typename enable_if< is_convertible::value>::type* = 0) { return executor_work_guard( ctx.get_executor()); } /// Create an @ref executor_work_guard object. template inline executor_work_guard::type> make_work_guard(const T& t, typename enable_if::value && !is_convertible::value>::type* = 0) { return executor_work_guard::type>( associated_executor::get(t)); } /// Create an @ref executor_work_guard object. template inline executor_work_guard::type> make_work_guard(const T& t, const Executor& ex, typename enable_if::value>::type* = 0) { return executor_work_guard::type>( associated_executor::get(t, ex)); } /// Create an @ref executor_work_guard object. template inline executor_work_guard::type> make_work_guard(const T& t, ExecutionContext& ctx, typename enable_if::value && !is_convertible::value && is_convertible::value>::type* = 0) { return executor_work_guard::type>( associated_executor::get( t, ctx.get_executor())); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_EXECUTOR_WORK_GUARD_HPP galera-4-26.4.25/asio/asio/coroutine.hpp000644 000164 177776 00000023002 15107057155 021117 0ustar00jenkinsnogroup000000 000000 // // coroutine.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COROUTINE_HPP #define ASIO_COROUTINE_HPP namespace asio { namespace detail { class coroutine_ref; } // namespace detail /// Provides support for implementing stackless coroutines. /** * The @c coroutine class may be used to implement stackless coroutines. The * class itself is used to store the current state of the coroutine. * * Coroutines are copy-constructible and assignable, and the space overhead is * a single int. They can be used as a base class: * * @code class session : coroutine * { * ... * }; @endcode * * or as a data member: * * @code class session * { * ... * coroutine coro_; * }; @endcode * * or even bound in as a function argument using lambdas or @c bind(). The * important thing is that as the application maintains a copy of the object * for as long as the coroutine must be kept alive. * * @par Pseudo-keywords * * A coroutine is used in conjunction with certain "pseudo-keywords", which * are implemented as macros. These macros are defined by a header file: * * @code #include @endcode * * and may conversely be undefined as follows: * * @code #include @endcode * * reenter * * The @c reenter macro is used to define the body of a coroutine. It takes a * single argument: a pointer or reference to a coroutine object. For example, * if the base class is a coroutine object you may write: * * @code reenter (this) * { * ... coroutine body ... * } @endcode * * and if a data member or other variable you can write: * * @code reenter (coro_) * { * ... coroutine body ... * } @endcode * * When @c reenter is executed at runtime, control jumps to the location of the * last @c yield or @c fork. * * The coroutine body may also be a single statement, such as: * * @code reenter (this) for (;;) * { * ... * } @endcode * * @b Limitation: The @c reenter macro is implemented using a switch. This * means that you must take care when using local variables within the * coroutine body. The local variable is not allowed in a position where * reentering the coroutine could bypass the variable definition. * * yield statement * * This form of the @c yield keyword is often used with asynchronous operations: * * @code yield socket_->async_read_some(buffer(*buffer_), *this); @endcode * * This divides into four logical steps: * * @li @c yield saves the current state of the coroutine. * @li The statement initiates the asynchronous operation. * @li The resume point is defined immediately following the statement. * @li Control is transferred to the end of the coroutine body. * * When the asynchronous operation completes, the function object is invoked * and @c reenter causes control to transfer to the resume point. It is * important to remember to carry the coroutine state forward with the * asynchronous operation. In the above snippet, the current class is a * function object object with a coroutine object as base class or data member. * * The statement may also be a compound statement, and this permits us to * define local variables with limited scope: * * @code yield * { * mutable_buffers_1 b = buffer(*buffer_); * socket_->async_read_some(b, *this); * } @endcode * * yield return expression ; * * This form of @c yield is often used in generators or coroutine-based parsers. * For example, the function object: * * @code struct interleave : coroutine * { * istream& is1; * istream& is2; * char operator()(char c) * { * reenter (this) for (;;) * { * yield return is1.get(); * yield return is2.get(); * } * } * }; @endcode * * defines a trivial coroutine that interleaves the characters from two input * streams. * * This type of @c yield divides into three logical steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li The value of the expression is returned from the function. * * yield ; * * This form of @c yield is equivalent to the following steps: * * @li @c yield saves the current state of the coroutine. * @li The resume point is defined immediately following the semicolon. * @li Control is transferred to the end of the coroutine body. * * This form might be applied when coroutines are used for cooperative * threading and scheduling is explicitly managed. For example: * * @code struct task : coroutine * { * ... * void operator()() * { * reenter (this) * { * while (... not finished ...) * { * ... do something ... * yield; * ... do some more ... * yield; * } * } * } * ... * }; * ... * task t1, t2; * for (;;) * { * t1(); * t2(); * } @endcode * * yield break ; * * The final form of @c yield is used to explicitly terminate the coroutine. * This form is comprised of two steps: * * @li @c yield sets the coroutine state to indicate termination. * @li Control is transferred to the end of the coroutine body. * * Once terminated, calls to is_complete() return true and the coroutine cannot * be reentered. * * Note that a coroutine may also be implicitly terminated if the coroutine * body is exited without a yield, e.g. by return, throw or by running to the * end of the body. * * fork statement * * The @c fork pseudo-keyword is used when "forking" a coroutine, i.e. splitting * it into two (or more) copies. One use of @c fork is in a server, where a new * coroutine is created to handle each client connection: * * @code reenter (this) * { * do * { * socket_.reset(new tcp::socket(my_context_)); * yield acceptor->async_accept(*socket_, *this); * fork server(*this)(); * } while (is_parent()); * ... client-specific handling follows ... * } @endcode * * The logical steps involved in a @c fork are: * * @li @c fork saves the current state of the coroutine. * @li The statement creates a copy of the coroutine and either executes it * immediately or schedules it for later execution. * @li The resume point is defined immediately following the semicolon. * @li For the "parent", control immediately continues from the next line. * * The functions is_parent() and is_child() can be used to differentiate * between parent and child. You would use these functions to alter subsequent * control flow. * * Note that @c fork doesn't do the actual forking by itself. It is the * application's responsibility to create a clone of the coroutine and call it. * The clone can be called immediately, as above, or scheduled for delayed * execution using something like asio::post(). * * @par Alternate macro names * * If preferred, an application can use macro names that follow a more typical * naming convention, rather than the pseudo-keywords. These are: * * @li @c ASIO_CORO_REENTER instead of @c reenter * @li @c ASIO_CORO_YIELD instead of @c yield * @li @c ASIO_CORO_FORK instead of @c fork */ class coroutine { public: /// Constructs a coroutine in its initial state. coroutine() : value_(0) {} /// Returns true if the coroutine is the child of a fork. bool is_child() const { return value_ < 0; } /// Returns true if the coroutine is the parent of a fork. bool is_parent() const { return !is_child(); } /// Returns true if the coroutine has reached its terminal state. bool is_complete() const { return value_ == -1; } private: friend class detail::coroutine_ref; int value_; }; namespace detail { class coroutine_ref { public: coroutine_ref(coroutine& c) : value_(c.value_), modified_(false) {} coroutine_ref(coroutine* c) : value_(c->value_), modified_(false) {} ~coroutine_ref() { if (!modified_) value_ = -1; } operator int() const { return value_; } int& operator=(int v) { modified_ = true; return value_ = v; } private: void operator=(const coroutine_ref&); int& value_; bool modified_; }; } // namespace detail } // namespace asio #define ASIO_CORO_REENTER(c) \ switch (::asio::detail::coroutine_ref _coro_value = c) \ case -1: if (_coro_value) \ { \ goto terminate_coroutine; \ terminate_coroutine: \ _coro_value = -1; \ goto bail_out_of_coroutine; \ bail_out_of_coroutine: \ break; \ } \ else /* fall-through */ case 0: #define ASIO_CORO_YIELD_IMPL(n) \ for (_coro_value = (n);;) \ if (_coro_value == 0) \ { \ case (n): ; \ break; \ } \ else \ switch (_coro_value ? 0 : 1) \ for (;;) \ /* fall-through */ case -1: if (_coro_value) \ goto terminate_coroutine; \ else for (;;) \ /* fall-through */ case 1: if (_coro_value) \ goto bail_out_of_coroutine; \ else /* fall-through */ case 0: #define ASIO_CORO_FORK_IMPL(n) \ for (_coro_value = -(n);; _coro_value = (n)) \ if (_coro_value == (n)) \ { \ case -(n): ; \ break; \ } \ else #if defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__COUNTER__ + 1) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__COUNTER__ + 1) #else // defined(_MSC_VER) # define ASIO_CORO_YIELD ASIO_CORO_YIELD_IMPL(__LINE__) # define ASIO_CORO_FORK ASIO_CORO_FORK_IMPL(__LINE__) #endif // defined(_MSC_VER) #endif // ASIO_COROUTINE_HPP galera-4-26.4.25/asio/asio/associated_allocator.hpp000644 000164 177776 00000007266 15107057155 023305 0ustar00jenkinsnogroup000000 000000 // // associated_allocator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ASSOCIATED_ALLOCATOR_HPP #define ASIO_ASSOCIATED_ALLOCATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct associated_allocator_check { typedef void type; }; template struct associated_allocator_impl { typedef E type; static type get(const T&, const E& e) ASIO_NOEXCEPT { return e; } }; template struct associated_allocator_impl::type> { typedef typename T::allocator_type type; static type get(const T& t, const E&) ASIO_NOEXCEPT { return t.get_allocator(); } }; } // namespace detail /// Traits type used to obtain the allocator associated with an object. /** * A program may specialise this traits type if the @c T template parameter in * the specialisation is a user-defined type. The template parameter @c * Allocator shall be a type meeting the Allocator requirements. * * Specialisations shall meet the following requirements, where @c t is a const * reference to an object of type @c T, and @c a is an object of type @c * Allocator. * * @li Provide a nested typedef @c type that identifies a type meeting the * Allocator requirements. * * @li Provide a noexcept static member function named @c get, callable as @c * get(t) and with return type @c type. * * @li Provide a noexcept static member function named @c get, callable as @c * get(t,a) and with return type @c type. */ template > struct associated_allocator { /// If @c T has a nested type @c allocator_type, T::allocator_type. /// Otherwise @c Allocator. #if defined(GENERATING_DOCUMENTATION) typedef see_below type; #else // defined(GENERATING_DOCUMENTATION) typedef typename detail::associated_allocator_impl::type type; #endif // defined(GENERATING_DOCUMENTATION) /// If @c T has a nested type @c allocator_type, returns /// t.get_allocator(). Otherwise returns @c a. static type get(const T& t, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return detail::associated_allocator_impl::get(t, a); } }; /// Helper function to obtain an object's associated allocator. /** * @returns associated_allocator::get(t) */ template inline typename associated_allocator::type get_associated_allocator(const T& t) ASIO_NOEXCEPT { return associated_allocator::get(t); } /// Helper function to obtain an object's associated allocator. /** * @returns associated_allocator::get(t, a) */ template inline typename associated_allocator::type get_associated_allocator(const T& t, const Allocator& a) ASIO_NOEXCEPT { return associated_allocator::get(t, a); } #if defined(ASIO_HAS_ALIAS_TEMPLATES) template > using associated_allocator_t = typename associated_allocator::type; #endif // defined(ASIO_HAS_ALIAS_TEMPLATES) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_ASSOCIATED_ALLOCATOR_HPP galera-4-26.4.25/asio/asio/connect.hpp000644 000164 177776 00000117772 15107057155 020563 0ustar00jenkinsnogroup000000 000000 // // connect.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_CONNECT_HPP #define ASIO_CONNECT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/basic_socket.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { char (&has_iterator_helper(...))[2]; template char has_iterator_helper(T*, typename T::iterator* = 0); template struct has_iterator_typedef { enum { value = (sizeof((has_iterator_helper)((T*)(0))) == 1) }; }; } // namespace detail /// Type trait used to determine whether a type is an endpoint sequence that can /// be used with with @c connect and @c async_connect. template struct is_endpoint_sequence { #if defined(GENERATING_DOCUMENTATION) /// The value member is true if the type may be used as an endpoint sequence. static const bool value; #else enum { value = detail::has_iterator_typedef::value }; #endif }; /** * @defgroup connect asio::connect * * @brief The @c connect function is a composed operation that establishes a * socket connection by trying each endpoint in a sequence. */ /*@{*/ /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @returns The successfully connected endpoint. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * asio::connect(s, r.resolve(q)); @endcode */ template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, typename enable_if::value>::type* = 0); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, the successfully connected endpoint. Otherwise, a * default-constructed endpoint. * * @par Example * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * asio::error_code ec; * asio::connect(s, r.resolve(q), ec); * if (ec) * { * // An error occurred. * } @endcode */ template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, asio::error_code& ec, typename enable_if::value>::type* = 0); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use range overload.) Establishes a socket connection by trying /// each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template Iterator connect(basic_socket& s, Iterator begin, typename enable_if::value>::type* = 0); /// (Deprecated: Use range overload.) Establishes a socket connection by trying /// each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template Iterator connect(basic_socket& s, Iterator begin, asio::error_code& ec, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_DEPRECATED) /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @returns An iterator denoting the successfully connected endpoint. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::resolver::results_type e = r.resolve(q); * tcp::socket s(my_context); * asio::connect(s, e.begin(), e.end()); @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @par Example * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::resolver::results_type e = r.resolve(q); * tcp::socket s(my_context); * asio::error_code ec; * asio::connect(s, e.begin(), e.end(), ec); * if (ec) * { * // An error occurred. * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, asio::error_code& ec); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @returns The successfully connected endpoint. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * tcp::endpoint e = asio::connect(s, * r.resolve(q), my_connect_condition()); * std::cout << "Connected to: " << e << std::endl; @endcode */ template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, typename enable_if::value>::type* = 0); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, the successfully connected endpoint. Otherwise, a * default-constructed endpoint. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * asio::error_code ec; * tcp::endpoint e = asio::connect(s, * r.resolve(q), my_connect_condition(), ec); * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << e << std::endl; * } @endcode */ template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, asio::error_code& ec, typename enable_if::value>::type* = 0); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use range overload.) Establishes a socket connection by trying /// each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, typename enable_if::value>::type* = 0); /// (Deprecated: Use range overload.) Establishes a socket connection by trying /// each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, asio::error_code& ec, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_DEPRECATED) /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @returns An iterator denoting the successfully connected endpoint. * * @throws asio::system_error Thrown on failure. If the sequence is * empty, the associated @c error_code is asio::error::not_found. * Otherwise, contains the error from the last connection attempt. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::resolver::results_type e = r.resolve(q); * tcp::socket s(my_context); * tcp::resolver::results_type::iterator i = asio::connect( * s, e.begin(), e.end(), my_connect_condition()); * std::cout << "Connected to: " << i->endpoint() << std::endl; @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition); /// Establishes a socket connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c connect member * function, once for each endpoint in the sequence, until a connection is * successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param ec Set to indicate what error occurred, if any. If the sequence is * empty, set to asio::error::not_found. Otherwise, contains the error * from the last connection attempt. * * @returns On success, an iterator denoting the successfully connected * endpoint. Otherwise, the end iterator. * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::resolver::results_type e = r.resolve(q); * tcp::socket s(my_context); * asio::error_code ec; * tcp::resolver::results_type::iterator i = asio::connect( * s, e.begin(), e.end(), my_connect_condition()); * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } @endcode */ template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, asio::error_code& ec); /*@}*/ /** * @defgroup async_connect asio::async_connect * * @brief The @c async_connect function is a composed asynchronous operation * that establishes a socket connection by trying each endpoint in a sequence. */ /*@{*/ /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, the successfully connected endpoint. * // Otherwise, a default-constructed endpoint. * const typename Protocol::endpoint& endpoint * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::results_type results) * { * if (!ec) * { * asio::async_connect(s, results, connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * const tcp::endpoint& endpoint) * { * // ... * } @endcode */ template ASIO_INITFN_RESULT_TYPE(RangeConnectHandler, void (asio::error_code, typename Protocol::endpoint)) async_connect(basic_socket& s, const EndpointSequence& endpoints, ASIO_MOVE_ARG(RangeConnectHandler) handler, typename enable_if::value>::type* = 0); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use range overload.) Asynchronously establishes a socket /// connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ASIO_MOVE_ARG(IteratorConnectHandler) handler, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_DEPRECATED) /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code std::vector endpoints = ...; * tcp::socket s(my_context); * asio::async_connect(s, * endpoints.begin(), endpoints.end(), * connect_handler); * * // ... * * void connect_handler( * const asio::error_code& ec, * std::vector::iterator i) * { * // ... * } @endcode */ template ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ASIO_MOVE_ARG(IteratorConnectHandler) handler); /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param endpoints A sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::results_type results) * { * if (!ec) * { * asio::async_connect(s, results, * my_connect_condition(), * connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * const tcp::endpoint& endpoint) * { * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << endpoint << std::endl; * } * } @endcode */ template ASIO_INITFN_RESULT_TYPE(RangeConnectHandler, void (asio::error_code, typename Protocol::endpoint)) async_connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, ASIO_MOVE_ARG(RangeConnectHandler) handler, typename enable_if::value>::type* = 0); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use range overload.) Asynchronously establishes a socket /// connection by trying each endpoint in a sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload assumes that a default constructed object of type @c * Iterator represents the end of the sequence. This is a valid assumption for * iterator types such as @c asio::ip::tcp::resolver::iterator. */ template ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, ASIO_MOVE_ARG(IteratorConnectHandler) handler, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_DEPRECATED) /// Asynchronously establishes a socket connection by trying each endpoint in a /// sequence. /** * This function attempts to connect a socket to one of a sequence of * endpoints. It does this by repeated calls to the socket's @c async_connect * member function, once for each endpoint in the sequence, until a connection * is successfully established. * * @param s The socket to be connected. If the socket is already open, it will * be closed. * * @param begin An iterator pointing to the start of a sequence of endpoints. * * @param end An iterator pointing to the end of a sequence of endpoints. * * @param connect_condition A function object that is called prior to each * connection attempt. The signature of the function object must be: * @code bool connect_condition( * const asio::error_code& ec, * const typename Protocol::endpoint& next); @endcode * The @c ec parameter contains the result from the most recent connect * operation. Before the first connection attempt, @c ec is always set to * indicate success. The @c next parameter is the next endpoint to be tried. * The function object should return true if the next endpoint should be tried, * and false if it should be skipped. * * @param handler The handler to be called when the connect operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * // Result of operation. if the sequence is empty, set to * // asio::error::not_found. Otherwise, contains the * // error from the last connection attempt. * const asio::error_code& error, * * // On success, an iterator denoting the successfully * // connected endpoint. Otherwise, the end iterator. * Iterator iterator * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * The following connect condition function object can be used to output * information about the individual connection attempts: * @code struct my_connect_condition * { * bool operator()( * const asio::error_code& ec, * const::tcp::endpoint& next) * { * if (ec) std::cout << "Error: " << ec.message() << std::endl; * std::cout << "Trying: " << next << std::endl; * return true; * } * }; @endcode * It would be used with the asio::connect function as follows: * @code tcp::resolver r(my_context); * tcp::resolver::query q("host", "service"); * tcp::socket s(my_context); * * // ... * * r.async_resolve(q, resolve_handler); * * // ... * * void resolve_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (!ec) * { * tcp::resolver::iterator end; * asio::async_connect(s, i, end, * my_connect_condition(), * connect_handler); * } * } * * // ... * * void connect_handler( * const asio::error_code& ec, * tcp::resolver::iterator i) * { * if (ec) * { * // An error occurred. * } * else * { * std::cout << "Connected to: " << i->endpoint() << std::endl; * } * } @endcode */ template ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, ASIO_MOVE_ARG(IteratorConnectHandler) handler); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/connect.hpp" #endif galera-4-26.4.25/asio/asio/buffered_read_stream.hpp000644 000164 177776 00000016455 15107057155 023256 0ustar00jenkinsnogroup000000 000000 // // buffered_read_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_READ_STREAM_HPP #define ASIO_BUFFERED_READ_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffered_read_stream_fwd.hpp" #include "asio/buffer.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_resize_guard.hpp" #include "asio/detail/buffered_stream_storage.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the read-related operations of a stream. /** * The buffered_read_stream class template can be used to add buffering to the * synchronous and asynchronous read operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_read_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the executor associated with the object. typedef typename lowest_layer_type::executor_type executor_type; #if defined(GENERATING_DOCUMENTATION) /// The default buffer size. static const std::size_t default_buffer_size = implementation_defined; #else ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024); #endif /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_read_stream(Arg& a) : next_layer_(a), storage_(default_buffer_size) { } /// Construct, passing the specified argument to initialise the next layer. template buffered_read_stream(Arg& a, std::size_t buffer_size) : next_layer_(a), storage_(buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return next_layer_.lowest_layer().get_executor(); } /// Close the stream. void close() { next_layer_.close(); } /// Close the stream. ASIO_SYNC_OP_VOID close(asio::error_code& ec) { next_layer_.close(ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers) { return next_layer_.write_some(buffers); } /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return next_layer_.write_some(buffers, ec); } /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return next_layer_.async_write_some(buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation. Throws an exception on failure. std::size_t fill(); /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation, or 0 if an error occurred. std::size_t fill(asio::error_code& ec); /// Start an asynchronous fill. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_fill(ASIO_MOVE_ARG(ReadHandler) handler); /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers); /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec); /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler); /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers); /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec); /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return storage_.size(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { ec = asio::error_code(); return storage_.size(); } private: /// Copy data out of the internal buffer to the specified target buffer. /// Returns the number of bytes copied. template std::size_t copy(const MutableBufferSequence& buffers) { std::size_t bytes_copied = asio::buffer_copy( buffers, storage_.data(), storage_.size()); storage_.consume(bytes_copied); return bytes_copied; } /// Copy data from the internal buffer to the specified target buffer, without /// removing the data from the internal buffer. Returns the number of bytes /// copied. template std::size_t peek_copy(const MutableBufferSequence& buffers) { return asio::buffer_copy(buffers, storage_.data(), storage_.size()); } /// The next layer. Stream next_layer_; // The data in the buffer. detail::buffered_stream_storage storage_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/buffered_read_stream.hpp" #endif // ASIO_BUFFERED_READ_STREAM_HPP galera-4-26.4.25/asio/asio/impl/000755 000164 177776 00000000000 15107057160 017337 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/impl/error.ipp000644 000164 177776 00000005531 15107057155 021212 0ustar00jenkinsnogroup000000 000000 // // impl/error.ipp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_ERROR_IPP #define ASIO_IMPL_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) namespace detail { class netdb_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.netdb"; } std::string message(int value) const { if (value == error::host_not_found) return "Host not found (authoritative)"; if (value == error::host_not_found_try_again) return "Host not found (non-authoritative), try again later"; if (value == error::no_data) return "The query is valid, but it does not have associated data"; if (value == error::no_recovery) return "A non-recoverable error occurred during database lookup"; return "asio.netdb error"; } }; } // namespace detail const asio::error_category& get_netdb_category() { static detail::netdb_category instance; return instance; } namespace detail { class addrinfo_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.addrinfo"; } std::string message(int value) const { if (value == error::service_not_found) return "Service not found"; if (value == error::socket_type_not_supported) return "Socket type not supported"; return "asio.addrinfo error"; } }; } // namespace detail const asio::error_category& get_addrinfo_category() { static detail::addrinfo_category instance; return instance; } #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) namespace detail { class misc_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.misc"; } std::string message(int value) const { if (value == error::already_open) return "Already open"; if (value == error::eof) return "End of file"; if (value == error::not_found) return "Element not found"; if (value == error::fd_set_failure) return "The descriptor does not fit into the select call's fd_set"; return "asio.misc error"; } }; } // namespace detail const asio::error_category& get_misc_category() { static detail::misc_category instance; return instance; } } // namespace error } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_ERROR_IPP galera-4-26.4.25/asio/asio/impl/connect.hpp000644 000164 177776 00000066527 15107057155 021525 0ustar00jenkinsnogroup000000 000000 // // impl/connect.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_CONNECT_HPP #define ASIO_IMPL_CONNECT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/post.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct default_connect_condition { template bool operator()(const asio::error_code&, const Endpoint&) { return true; } }; template inline typename Protocol::endpoint deref_connect_result( Iterator iter, asio::error_code& ec) { return ec ? typename Protocol::endpoint() : *iter; } template struct legacy_connect_condition_helper : T { typedef char (*fallback_func_type)(...); operator fallback_func_type() const; }; template struct legacy_connect_condition_helper { R operator()(Arg1, Arg2) const; char operator()(...) const; }; template struct is_legacy_connect_condition { static char asio_connect_condition_check(char); static char (&asio_connect_condition_check(Iterator))[2]; static const bool value = sizeof(asio_connect_condition_check( (*static_cast*>(0))( *static_cast(0), *static_cast(0)))) != 1; }; template inline Iterator call_connect_condition(ConnectCondition& connect_condition, const asio::error_code& ec, Iterator next, Iterator end, typename enable_if::value>::type* = 0) { if (next != end) return connect_condition(ec, next); return end; } template inline Iterator call_connect_condition(ConnectCondition& connect_condition, const asio::error_code& ec, Iterator next, Iterator end, typename enable_if::value>::type* = 0) { for (;next != end; ++next) if (connect_condition(ec, *next)) return next; return end; } } template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, typename enable_if::value>::type*) { asio::error_code ec; typename Protocol::endpoint result = connect(s, endpoints, ec); asio::detail::throw_error(ec, "connect"); return result; } template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, asio::error_code& ec, typename enable_if::value>::type*) { return detail::deref_connect_result( connect(s, endpoints.begin(), endpoints.end(), detail::default_connect_condition(), ec), ec); } #if !defined(ASIO_NO_DEPRECATED) template Iterator connect(basic_socket& s, Iterator begin, typename enable_if::value>::type*) { asio::error_code ec; Iterator result = connect(s, begin, ec); asio::detail::throw_error(ec, "connect"); return result; } template inline Iterator connect(basic_socket& s, Iterator begin, asio::error_code& ec, typename enable_if::value>::type*) { return connect(s, begin, Iterator(), detail::default_connect_condition(), ec); } #endif // !defined(ASIO_NO_DEPRECATED) template Iterator connect(basic_socket& s, Iterator begin, Iterator end) { asio::error_code ec; Iterator result = connect(s, begin, end, ec); asio::detail::throw_error(ec, "connect"); return result; } template inline Iterator connect(basic_socket& s, Iterator begin, Iterator end, asio::error_code& ec) { return connect(s, begin, end, detail::default_connect_condition(), ec); } template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, typename enable_if::value>::type*) { asio::error_code ec; typename Protocol::endpoint result = connect( s, endpoints, connect_condition, ec); asio::detail::throw_error(ec, "connect"); return result; } template typename Protocol::endpoint connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, asio::error_code& ec, typename enable_if::value>::type*) { return detail::deref_connect_result( connect(s, endpoints.begin(), endpoints.end(), connect_condition, ec), ec); } #if !defined(ASIO_NO_DEPRECATED) template Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, typename enable_if::value>::type*) { asio::error_code ec; Iterator result = connect(s, begin, connect_condition, ec); asio::detail::throw_error(ec, "connect"); return result; } template inline Iterator connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, asio::error_code& ec, typename enable_if::value>::type*) { return connect(s, begin, Iterator(), connect_condition, ec); } #endif // !defined(ASIO_NO_DEPRECATED) template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition) { asio::error_code ec; Iterator result = connect(s, begin, end, connect_condition, ec); asio::detail::throw_error(ec, "connect"); return result; } template Iterator connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, asio::error_code& ec) { ec = asio::error_code(); for (Iterator iter = begin; iter != end; ++iter) { iter = (detail::call_connect_condition(connect_condition, ec, iter, end)); if (iter != end) { s.close(ec); s.connect(*iter, ec); if (!ec) return iter; } else break; } if (!ec) ec = asio::error::not_found; return end; } namespace detail { // Enable the empty base class optimisation for the connect condition. template class base_from_connect_condition { protected: explicit base_from_connect_condition( const ConnectCondition& connect_condition) : connect_condition_(connect_condition) { } template void check_condition(const asio::error_code& ec, Iterator& iter, Iterator& end) { iter = detail::call_connect_condition(connect_condition_, ec, iter, end); } private: ConnectCondition connect_condition_; }; // The default_connect_condition implementation is essentially a no-op. This // template specialisation lets us eliminate all costs associated with it. template <> class base_from_connect_condition { protected: explicit base_from_connect_condition(const default_connect_condition&) { } template void check_condition(const asio::error_code&, Iterator&, Iterator&) { } }; template class range_connect_op : base_from_connect_condition { public: range_connect_op(basic_socket& sock, const EndpointSequence& endpoints, const ConnectCondition& connect_condition, RangeConnectHandler& handler) : base_from_connect_condition(connect_condition), socket_(sock), endpoints_(endpoints), index_(0), start_(0), handler_(ASIO_MOVE_CAST(RangeConnectHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) range_connect_op(const range_connect_op& other) : base_from_connect_condition(other), socket_(other.socket_), endpoints_(other.endpoints_), index_(other.index_), start_(other.start_), handler_(other.handler_) { } range_connect_op(range_connect_op&& other) : base_from_connect_condition(other), socket_(other.socket_), endpoints_(other.endpoints_), index_(other.index_), start_(other.start_), handler_(ASIO_MOVE_CAST(RangeConnectHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(asio::error_code ec, int start = 0) { this->process(ec, start, const_cast(endpoints_).begin(), const_cast(endpoints_).end()); } //private: template void process(asio::error_code ec, int start, Iterator begin, Iterator end) { Iterator iter = begin; std::advance(iter, index_); switch (start_ = start) { case 1: for (;;) { this->check_condition(ec, iter, end); index_ = std::distance(begin, iter); if (iter != end) { socket_.close(ec); socket_.async_connect(*iter, ASIO_MOVE_CAST(range_connect_op)(*this)); return; } if (start) { ec = asio::error::not_found; asio::post(socket_.get_executor(), detail::bind_handler( ASIO_MOVE_CAST(range_connect_op)(*this), ec)); return; } /* fall-through */ default: if (iter == end) break; if (!socket_.is_open()) { ec = asio::error::operation_aborted; break; } if (!ec) break; ++iter; ++index_; } handler_(static_cast(ec), static_cast( ec || iter == end ? typename Protocol::endpoint() : *iter)); } } basic_socket& socket_; EndpointSequence endpoints_; std::size_t index_; int start_; RangeConnectHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, range_connect_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, range_connect_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( range_connect_op* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, range_connect_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, range_connect_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_range_connect { template void operator()(ASIO_MOVE_ARG(RangeConnectHandler) handler, basic_socket* s, const EndpointSequence& endpoints, const ConnectCondition& connect_condition) const { // If you get an error on the following line it means that your // handler does not meet the documented type requirements for an // RangeConnectHandler. ASIO_RANGE_CONNECT_HANDLER_CHECK(RangeConnectHandler, handler, typename Protocol::endpoint) type_check; non_const_lvalue handler2(handler); range_connect_op::type>(*s, endpoints, connect_condition, handler2.value)(asio::error_code(), 1); } }; template class iterator_connect_op : base_from_connect_condition { public: iterator_connect_op(basic_socket& sock, const Iterator& begin, const Iterator& end, const ConnectCondition& connect_condition, IteratorConnectHandler& handler) : base_from_connect_condition(connect_condition), socket_(sock), iter_(begin), end_(end), start_(0), handler_(ASIO_MOVE_CAST(IteratorConnectHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) iterator_connect_op(const iterator_connect_op& other) : base_from_connect_condition(other), socket_(other.socket_), iter_(other.iter_), end_(other.end_), start_(other.start_), handler_(other.handler_) { } iterator_connect_op(iterator_connect_op&& other) : base_from_connect_condition(other), socket_(other.socket_), iter_(other.iter_), end_(other.end_), start_(other.start_), handler_(ASIO_MOVE_CAST(IteratorConnectHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(asio::error_code ec, int start = 0) { switch (start_ = start) { case 1: for (;;) { this->check_condition(ec, iter_, end_); if (iter_ != end_) { socket_.close(ec); socket_.async_connect(*iter_, ASIO_MOVE_CAST(iterator_connect_op)(*this)); return; } if (start) { ec = asio::error::not_found; asio::post(socket_.get_executor(), detail::bind_handler( ASIO_MOVE_CAST(iterator_connect_op)(*this), ec)); return; } /* fall-through */ default: if (iter_ == end_) break; if (!socket_.is_open()) { ec = asio::error::operation_aborted; break; } if (!ec) break; ++iter_; } handler_(static_cast(ec), static_cast(iter_)); } } //private: basic_socket& socket_; Iterator iter_; Iterator end_; int start_; IteratorConnectHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, iterator_connect_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, iterator_connect_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( iterator_connect_op* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, iterator_connect_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, iterator_connect_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_iterator_connect { template void operator()(ASIO_MOVE_ARG(IteratorConnectHandler) handler, basic_socket* s, Iterator begin, Iterator end, const ConnectCondition& connect_condition) const { // If you get an error on the following line it means that your // handler does not meet the documented type requirements for an // IteratorConnectHandler. ASIO_ITERATOR_CONNECT_HANDLER_CHECK( IteratorConnectHandler, handler, Iterator) type_check; non_const_lvalue handler2(handler); iterator_connect_op::type>(*s, begin, end, connect_condition, handler2.value)(asio::error_code(), 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::range_connect_op, Allocator> { typedef typename associated_allocator< RangeConnectHandler, Allocator>::type type; static type get( const detail::range_connect_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::range_connect_op, Executor1> { typedef typename associated_executor< RangeConnectHandler, Executor1>::type type; static type get( const detail::range_connect_op& h, const Executor1& ex = Executor1()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; template struct associated_allocator< detail::iterator_connect_op, Allocator> { typedef typename associated_allocator< IteratorConnectHandler, Allocator>::type type; static type get( const detail::iterator_connect_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::iterator_connect_op, Executor1> { typedef typename associated_executor< IteratorConnectHandler, Executor1>::type type; static type get( const detail::iterator_connect_op& h, const Executor1& ex = Executor1()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(RangeConnectHandler, void (asio::error_code, typename Protocol::endpoint)) async_connect(basic_socket& s, const EndpointSequence& endpoints, ASIO_MOVE_ARG(RangeConnectHandler) handler, typename enable_if::value>::type*) { return async_initiate( detail::initiate_async_range_connect(), handler, &s, endpoints, detail::default_connect_condition()); } #if !defined(ASIO_NO_DEPRECATED) template inline ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ASIO_MOVE_ARG(IteratorConnectHandler) handler, typename enable_if::value>::type*) { return async_initiate( detail::initiate_async_iterator_connect(), handler, &s, begin, Iterator(), detail::default_connect_condition()); } #endif // !defined(ASIO_NO_DEPRECATED) template inline ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ASIO_MOVE_ARG(IteratorConnectHandler) handler) { return async_initiate( detail::initiate_async_iterator_connect(), handler, &s, begin, end, detail::default_connect_condition()); } template inline ASIO_INITFN_RESULT_TYPE(RangeConnectHandler, void (asio::error_code, typename Protocol::endpoint)) async_connect(basic_socket& s, const EndpointSequence& endpoints, ConnectCondition connect_condition, ASIO_MOVE_ARG(RangeConnectHandler) handler, typename enable_if::value>::type*) { return async_initiate( detail::initiate_async_range_connect(), handler, &s, endpoints, connect_condition); } #if !defined(ASIO_NO_DEPRECATED) template inline ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, ConnectCondition connect_condition, ASIO_MOVE_ARG(IteratorConnectHandler) handler, typename enable_if::value>::type*) { return async_initiate( detail::initiate_async_iterator_connect(), handler, &s, begin, Iterator(), connect_condition); } #endif // !defined(ASIO_NO_DEPRECATED) template inline ASIO_INITFN_RESULT_TYPE(IteratorConnectHandler, void (asio::error_code, Iterator)) async_connect(basic_socket& s, Iterator begin, Iterator end, ConnectCondition connect_condition, ASIO_MOVE_ARG(IteratorConnectHandler) handler) { return async_initiate( detail::initiate_async_iterator_connect(), handler, &s, begin, end, connect_condition); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_CONNECT_HPP galera-4-26.4.25/asio/asio/impl/buffered_read_stream.hpp000644 000164 177776 00000032701 15107057155 024207 0ustar00jenkinsnogroup000000 000000 // // impl/buffered_read_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_BUFFERED_READ_STREAM_HPP #define ASIO_IMPL_BUFFERED_READ_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/push_options.hpp" namespace asio { template std::size_t buffered_read_stream::fill() { detail::buffer_resize_guard resize_guard(storage_); std::size_t previous_size = storage_.size(); storage_.resize(storage_.capacity()); storage_.resize(previous_size + next_layer_.read_some(buffer( storage_.data() + previous_size, storage_.size() - previous_size))); resize_guard.commit(); return storage_.size() - previous_size; } template std::size_t buffered_read_stream::fill(asio::error_code& ec) { detail::buffer_resize_guard resize_guard(storage_); std::size_t previous_size = storage_.size(); storage_.resize(storage_.capacity()); storage_.resize(previous_size + next_layer_.read_some(buffer( storage_.data() + previous_size, storage_.size() - previous_size), ec)); resize_guard.commit(); return storage_.size() - previous_size; } namespace detail { template class buffered_fill_handler { public: buffered_fill_handler(detail::buffered_stream_storage& storage, std::size_t previous_size, ReadHandler& handler) : storage_(storage), previous_size_(previous_size), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) buffered_fill_handler(const buffered_fill_handler& other) : storage_(other.storage_), previous_size_(other.previous_size_), handler_(other.handler_) { } buffered_fill_handler(buffered_fill_handler&& other) : storage_(other.storage_), previous_size_(other.previous_size_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, const std::size_t bytes_transferred) { storage_.resize(previous_size_ + bytes_transferred); handler_(ec, bytes_transferred); } //private: detail::buffered_stream_storage& storage_; std::size_t previous_size_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, buffered_fill_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, buffered_fill_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( buffered_fill_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, buffered_fill_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, buffered_fill_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_buffered_fill { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, buffered_stream_storage* storage, Stream* next_layer) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); std::size_t previous_size = storage->size(); storage->resize(storage->capacity()); next_layer->async_read_some( buffer( storage->data() + previous_size, storage->size() - previous_size), buffered_fill_handler::type>( *storage, previous_size, handler2.value)); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::buffered_fill_handler, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::buffered_fill_handler& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::buffered_fill_handler, Executor> { typedef typename associated_executor::type type; static type get(const detail::buffered_fill_handler& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) buffered_read_stream::async_fill( ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_buffered_fill(), handler, &storage_, &next_layer_); } template template std::size_t buffered_read_stream::read_some( const MutableBufferSequence& buffers) { using asio::buffer_size; if (buffer_size(buffers) == 0) return 0; if (storage_.empty()) this->fill(); return this->copy(buffers); } template template std::size_t buffered_read_stream::read_some( const MutableBufferSequence& buffers, asio::error_code& ec) { ec = asio::error_code(); using asio::buffer_size; if (buffer_size(buffers) == 0) return 0; if (storage_.empty() && !this->fill(ec)) return 0; return this->copy(buffers); } namespace detail { template class buffered_read_some_handler { public: buffered_read_some_handler(detail::buffered_stream_storage& storage, const MutableBufferSequence& buffers, ReadHandler& handler) : storage_(storage), buffers_(buffers), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) buffered_read_some_handler(const buffered_read_some_handler& other) : storage_(other.storage_), buffers_(other.buffers_), handler_(other.handler_) { } buffered_read_some_handler(buffered_read_some_handler&& other) : storage_(other.storage_), buffers_(other.buffers_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t) { if (ec || storage_.empty()) { const std::size_t length = 0; handler_(ec, length); } else { const std::size_t bytes_copied = asio::buffer_copy( buffers_, storage_.data(), storage_.size()); storage_.consume(bytes_copied); handler_(ec, bytes_copied); } } //private: detail::buffered_stream_storage& storage_; MutableBufferSequence buffers_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, buffered_read_some_handler< MutableBufferSequence, ReadHandler>* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, buffered_read_some_handler< MutableBufferSequence, ReadHandler>* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( buffered_read_some_handler< MutableBufferSequence, ReadHandler>* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, buffered_read_some_handler< MutableBufferSequence, ReadHandler>* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, buffered_read_some_handler< MutableBufferSequence, ReadHandler>* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_buffered_read_some { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, buffered_stream_storage* storage, Stream* next_layer, const MutableBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; using asio::buffer_size; non_const_lvalue handler2(handler); if (buffer_size(buffers) == 0 || !storage->empty()) { next_layer->async_read_some(ASIO_MUTABLE_BUFFER(0, 0), buffered_read_some_handler::type>( *storage, buffers, handler2.value)); } else { initiate_async_buffered_fill()( buffered_read_some_handler::type>( *storage, buffers, handler2.value), storage, next_layer); } } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::buffered_read_some_handler, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::buffered_read_some_handler< MutableBufferSequence, ReadHandler>& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::buffered_read_some_handler, Executor> { typedef typename associated_executor::type type; static type get( const detail::buffered_read_some_handler< MutableBufferSequence, ReadHandler>& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) buffered_read_stream::async_read_some( const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_buffered_read_some(), handler, &storage_, &next_layer_, buffers); } template template std::size_t buffered_read_stream::peek( const MutableBufferSequence& buffers) { if (storage_.empty()) this->fill(); return this->peek_copy(buffers); } template template std::size_t buffered_read_stream::peek( const MutableBufferSequence& buffers, asio::error_code& ec) { ec = asio::error_code(); if (storage_.empty() && !this->fill(ec)) return 0; return this->peek_copy(buffers); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_BUFFERED_READ_STREAM_HPP galera-4-26.4.25/asio/asio/impl/executor.ipp000644 000164 177776 00000001462 15107057155 021716 0ustar00jenkinsnogroup000000 000000 // // impl/executor.ipp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_EXECUTOR_IPP #define ASIO_IMPL_EXECUTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { bad_executor::bad_executor() ASIO_NOEXCEPT { } const char* bad_executor::what() const ASIO_NOEXCEPT_OR_NOTHROW { return "bad executor"; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_EXECUTOR_IPP galera-4-26.4.25/asio/asio/impl/thread_pool.ipp000644 000164 177776 00000003517 15107057155 022363 0ustar00jenkinsnogroup000000 000000 // // impl/thread_pool.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_THREAD_POOL_IPP #define ASIO_IMPL_THREAD_POOL_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/thread_pool.hpp" #include "asio/detail/push_options.hpp" namespace asio { struct thread_pool::thread_function { detail::scheduler* scheduler_; void operator()() { asio::error_code ec; scheduler_->run(ec); } }; thread_pool::thread_pool() : scheduler_(add_scheduler(new detail::scheduler(*this, 0, false))) { scheduler_.work_started(); thread_function f = { &scheduler_ }; std::size_t num_threads = detail::thread::hardware_concurrency() * 2; threads_.create_threads(f, num_threads ? num_threads : 2); } thread_pool::thread_pool(std::size_t num_threads) : scheduler_(add_scheduler(new detail::scheduler( *this, num_threads == 1 ? 1 : 0, false))) { scheduler_.work_started(); thread_function f = { &scheduler_ }; threads_.create_threads(f, num_threads); } thread_pool::~thread_pool() { stop(); join(); } void thread_pool::stop() { scheduler_.stop(); } void thread_pool::join() { if (!threads_.empty()) { scheduler_.work_finished(); threads_.join(); } } detail::scheduler& thread_pool::add_scheduler(detail::scheduler* s) { detail::scoped_ptr scoped_impl(s); asio::add_service(*this, scoped_impl.get()); return *scoped_impl.release(); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_THREAD_POOL_IPP galera-4-26.4.25/asio/asio/impl/system_executor.hpp000644 000164 177776 00000005010 15107057155 023312 0ustar00jenkinsnogroup000000 000000 // // impl/system_executor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SYSTEM_EXECUTOR_HPP #define ASIO_IMPL_SYSTEM_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/executor_op.hpp" #include "asio/detail/global.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/detail/type_traits.hpp" #include "asio/system_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { inline system_context& system_executor::context() const ASIO_NOEXCEPT { return detail::global(); } template void system_executor::dispatch( ASIO_MOVE_ARG(Function) f, const Allocator&) const { typename decay::type tmp(ASIO_MOVE_CAST(Function)(f)); asio_handler_invoke_helpers::invoke(tmp, tmp); } template void system_executor::post( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; system_context& ctx = detail::global(); // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((ctx, *p.p, "system_executor", &this->context(), 0, "post")); ctx.scheduler_.post_immediate_completion(p.p, false); p.v = p.p = 0; } template void system_executor::defer( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; system_context& ctx = detail::global(); // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((ctx, *p.p, "system_executor", &this->context(), 0, "defer")); ctx.scheduler_.post_immediate_completion(p.p, true); p.v = p.p = 0; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_SYSTEM_EXECUTOR_HPP galera-4-26.4.25/asio/asio/impl/io_context.hpp000644 000164 177776 00000023446 15107057155 022240 0ustar00jenkinsnogroup000000 000000 // // impl/io_context.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_IO_CONTEXT_HPP #define ASIO_IMPL_IO_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/completion_handler.hpp" #include "asio/detail/executor_op.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/detail/service_registry.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" #if !defined(GENERATING_DOCUMENTATION) namespace asio { template inline Service& use_service(io_context& ioc) { // Check that Service meets the necessary type requirements. (void)static_cast(static_cast(0)); (void)static_cast(&Service::id); return ioc.service_registry_->template use_service(ioc); } template <> inline detail::io_context_impl& use_service( io_context& ioc) { return ioc.impl_; } } // namespace asio #endif // !defined(GENERATING_DOCUMENTATION) #include "asio/detail/pop_options.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else # include "asio/detail/scheduler.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { inline io_context::executor_type io_context::get_executor() ASIO_NOEXCEPT { return executor_type(*this); } #if defined(ASIO_HAS_CHRONO) template std::size_t io_context::run_for( const chrono::duration& rel_time) { return this->run_until(chrono::steady_clock::now() + rel_time); } template std::size_t io_context::run_until( const chrono::time_point& abs_time) { std::size_t n = 0; while (this->run_one_until(abs_time)) if (n != (std::numeric_limits::max)()) ++n; return n; } template std::size_t io_context::run_one_for( const chrono::duration& rel_time) { return this->run_one_until(chrono::steady_clock::now() + rel_time); } template std::size_t io_context::run_one_until( const chrono::time_point& abs_time) { typename Clock::time_point now = Clock::now(); while (now < abs_time) { typename Clock::duration rel_time = abs_time - now; if (rel_time > chrono::seconds(1)) rel_time = chrono::seconds(1); asio::error_code ec; std::size_t s = impl_.wait_one( static_cast(chrono::duration_cast< chrono::microseconds>(rel_time).count()), ec); asio::detail::throw_error(ec); if (s || impl_.stopped()) return s; now = Clock::now(); } return 0; } #endif // defined(ASIO_HAS_CHRONO) #if !defined(ASIO_NO_DEPRECATED) inline void io_context::reset() { restart(); } struct io_context::initiate_dispatch { template void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler, io_context* self) const { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a LegacyCompletionHandler. ASIO_LEGACY_COMPLETION_HANDLER_CHECK( LegacyCompletionHandler, handler) type_check; detail::non_const_lvalue handler2(handler); if (self->impl_.can_dispatch()) { detail::fenced_block b(detail::fenced_block::full); asio_handler_invoke_helpers::invoke( handler2.value, handler2.value); } else { // Allocate and construct an operation to wrap the handler. typedef detail::completion_handler< typename decay::type> op; typename op::ptr p = { detail::addressof(handler2.value), op::ptr::allocate(handler2.value), 0 }; p.p = new (p.v) op(handler2.value); ASIO_HANDLER_CREATION((*self, *p.p, "io_context", self, 0, "dispatch")); self->impl_.do_dispatch(p.p); p.v = p.p = 0; } } }; template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) io_context::dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler) { return async_initiate( initiate_dispatch(), handler, this); } struct io_context::initiate_post { template void operator()(ASIO_MOVE_ARG(LegacyCompletionHandler) handler, io_context* self) const { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a LegacyCompletionHandler. ASIO_LEGACY_COMPLETION_HANDLER_CHECK( LegacyCompletionHandler, handler) type_check; detail::non_const_lvalue handler2(handler); bool is_continuation = asio_handler_cont_helpers::is_continuation(handler2.value); // Allocate and construct an operation to wrap the handler. typedef detail::completion_handler< typename decay::type> op; typename op::ptr p = { detail::addressof(handler2.value), op::ptr::allocate(handler2.value), 0 }; p.p = new (p.v) op(handler2.value); ASIO_HANDLER_CREATION((*self, *p.p, "io_context", self, 0, "post")); self->impl_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } }; template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) io_context::post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler) { return async_initiate( initiate_post(), handler, this); } template #if defined(GENERATING_DOCUMENTATION) unspecified #else inline detail::wrapped_handler #endif io_context::wrap(Handler handler) { return detail::wrapped_handler(*this, handler); } #endif // !defined(ASIO_NO_DEPRECATED) inline io_context& io_context::executor_type::context() const ASIO_NOEXCEPT { return io_context_; } inline void io_context::executor_type::on_work_started() const ASIO_NOEXCEPT { io_context_.impl_.work_started(); } inline void io_context::executor_type::on_work_finished() const ASIO_NOEXCEPT { io_context_.impl_.work_finished(); } template void io_context::executor_type::dispatch( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Invoke immediately if we are already inside the thread pool. if (io_context_.impl_.can_dispatch()) { // Make a local, non-const copy of the function. function_type tmp(ASIO_MOVE_CAST(Function)(f)); detail::fenced_block b(detail::fenced_block::full); asio_handler_invoke_helpers::invoke(tmp, tmp); return; } // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((this->context(), *p.p, "io_context", &this->context(), 0, "dispatch")); io_context_.impl_.post_immediate_completion(p.p, false); p.v = p.p = 0; } template void io_context::executor_type::post( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((this->context(), *p.p, "io_context", &this->context(), 0, "post")); io_context_.impl_.post_immediate_completion(p.p, false); p.v = p.p = 0; } template void io_context::executor_type::defer( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((this->context(), *p.p, "io_context", &this->context(), 0, "defer")); io_context_.impl_.post_immediate_completion(p.p, true); p.v = p.p = 0; } inline bool io_context::executor_type::running_in_this_thread() const ASIO_NOEXCEPT { return io_context_.impl_.can_dispatch(); } #if !defined(ASIO_NO_DEPRECATED) inline io_context::work::work(asio::io_context& io_context) : io_context_impl_(io_context.impl_) { io_context_impl_.work_started(); } inline io_context::work::work(const work& other) : io_context_impl_(other.io_context_impl_) { io_context_impl_.work_started(); } inline io_context::work::~work() { io_context_impl_.work_finished(); } inline asio::io_context& io_context::work::get_io_context() { return static_cast(io_context_impl_.context()); } #endif // !defined(ASIO_NO_DEPRECATED) inline asio::io_context& io_context::service::get_io_context() { return static_cast(context()); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_IO_CONTEXT_HPP galera-4-26.4.25/asio/asio/impl/serial_port_base.hpp000644 000164 177776 00000002432 15107057155 023372 0ustar00jenkinsnogroup000000 000000 // // impl/serial_port_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SERIAL_PORT_BASE_HPP #define ASIO_IMPL_SERIAL_PORT_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { inline serial_port_base::baud_rate::baud_rate(unsigned int rate) : value_(rate) { } inline unsigned int serial_port_base::baud_rate::value() const { return value_; } inline serial_port_base::flow_control::type serial_port_base::flow_control::value() const { return value_; } inline serial_port_base::parity::type serial_port_base::parity::value() const { return value_; } inline serial_port_base::stop_bits::type serial_port_base::stop_bits::value() const { return value_; } inline unsigned int serial_port_base::character_size::value() const { return value_; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_SERIAL_PORT_BASE_HPP galera-4-26.4.25/asio/asio/impl/system_context.ipp000644 000164 177776 00000003316 15107057155 023150 0ustar00jenkinsnogroup000000 000000 // // impl/system_context.ipp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SYSTEM_CONTEXT_IPP #define ASIO_IMPL_SYSTEM_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/system_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { struct system_context::thread_function { detail::scheduler* scheduler_; void operator()() { asio::error_code ec; scheduler_->run(ec); } }; system_context::system_context() : scheduler_(add_scheduler(new detail::scheduler(*this, 0, false))) { scheduler_.work_started(); thread_function f = { &scheduler_ }; std::size_t num_threads = detail::thread::hardware_concurrency() * 2; threads_.create_threads(f, num_threads ? num_threads : 2); } system_context::~system_context() { scheduler_.work_finished(); scheduler_.stop(); threads_.join(); } void system_context::stop() { scheduler_.stop(); } bool system_context::stopped() const ASIO_NOEXCEPT { return scheduler_.stopped(); } void system_context::join() { scheduler_.work_finished(); threads_.join(); } detail::scheduler& system_context::add_scheduler(detail::scheduler* s) { detail::scoped_ptr scoped_impl(s); asio::add_service(*this, scoped_impl.get()); return *scoped_impl.release(); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_SYSTEM_CONTEXT_IPP galera-4-26.4.25/asio/asio/impl/use_future.hpp000644 000164 177776 00000053764 15107057155 022261 0ustar00jenkinsnogroup000000 000000 // // impl/use_future.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_USE_FUTURE_HPP #define ASIO_IMPL_USE_FUTURE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/memory.hpp" #include "asio/error_code.hpp" #include "asio/packaged_task.hpp" #include "asio/system_error.hpp" #include "asio/system_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template inline void promise_invoke_and_set(std::promise& p, F& f, ASIO_MOVE_ARG(Args)... args) { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { p.set_value(f(ASIO_MOVE_CAST(Args)(args)...)); } #if !defined(ASIO_NO_EXCEPTIONS) catch (...) { p.set_exception(std::current_exception()); } #endif // !defined(ASIO_NO_EXCEPTIONS) } template inline void promise_invoke_and_set(std::promise& p, F& f, ASIO_MOVE_ARG(Args)... args) { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { f(ASIO_MOVE_CAST(Args)(args)...); p.set_value(); } #if !defined(ASIO_NO_EXCEPTIONS) catch (...) { p.set_exception(std::current_exception()); } #endif // !defined(ASIO_NO_EXCEPTIONS) } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template inline void promise_invoke_and_set(std::promise& p, F& f) { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { p.set_value(f()); } #if !defined(ASIO_NO_EXCEPTIONS) catch (...) { p.set_exception(std::current_exception()); } #endif // !defined(ASIO_NO_EXCEPTIONS) } template inline void promise_invoke_and_set(std::promise& p, F& f) { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { f(); p.set_value(); #if !defined(ASIO_NO_EXCEPTIONS) } catch (...) { p.set_exception(std::current_exception()); } #endif // !defined(ASIO_NO_EXCEPTIONS) } #if defined(ASIO_NO_EXCEPTIONS) #define ASIO_PRIVATE_PROMISE_INVOKE_DEF(n) \ template \ inline void promise_invoke_and_set(std::promise& p, \ F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ p.set_value(f(ASIO_VARIADIC_MOVE_ARGS(n))); \ } \ \ template \ inline void promise_invoke_and_set(std::promise& p, \ F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ f(ASIO_VARIADIC_MOVE_ARGS(n)); \ p.set_value(); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_INVOKE_DEF) #undef ASIO_PRIVATE_PROMISE_INVOKE_DEF #else // defined(ASIO_NO_EXCEPTIONS) #define ASIO_PRIVATE_PROMISE_INVOKE_DEF(n) \ template \ inline void promise_invoke_and_set(std::promise& p, \ F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ try \ { \ p.set_value(f(ASIO_VARIADIC_MOVE_ARGS(n))); \ } \ catch (...) \ { \ p.set_exception(std::current_exception()); \ } \ } \ \ template \ inline void promise_invoke_and_set(std::promise& p, \ F& f, ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ try \ { \ f(ASIO_VARIADIC_MOVE_ARGS(n)); \ p.set_value(); \ } \ catch (...) \ { \ p.set_exception(std::current_exception()); \ } \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_INVOKE_DEF) #undef ASIO_PRIVATE_PROMISE_INVOKE_DEF #endif // defined(ASIO_NO_EXCEPTIONS) #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) // A function object adapter to invoke a nullary function object and capture // any exception thrown into a promise. template class promise_invoker { public: promise_invoker(const shared_ptr >& p, ASIO_MOVE_ARG(F) f) : p_(p), f_(ASIO_MOVE_CAST(F)(f)) { } void operator()() { #if !defined(ASIO_NO_EXCEPTIONS) try #endif // !defined(ASIO_NO_EXCEPTIONS) { f_(); } #if !defined(ASIO_NO_EXCEPTIONS) catch (...) { p_->set_exception(std::current_exception()); } #endif // !defined(ASIO_NO_EXCEPTIONS) } private: shared_ptr > p_; typename decay::type f_; }; // An executor that adapts the system_executor to capture any exeption thrown // by a submitted function object and save it into a promise. template class promise_executor { public: explicit promise_executor(const shared_ptr >& p) : p_(p) { } execution_context& context() const ASIO_NOEXCEPT { return system_executor().context(); } void on_work_started() const ASIO_NOEXCEPT {} void on_work_finished() const ASIO_NOEXCEPT {} template void dispatch(ASIO_MOVE_ARG(F) f, const A&) const { promise_invoker(p_, ASIO_MOVE_CAST(F)(f))(); } template void post(ASIO_MOVE_ARG(F) f, const A& a) const { system_executor().post( promise_invoker(p_, ASIO_MOVE_CAST(F)(f)), a); } template void defer(ASIO_MOVE_ARG(F) f, const A& a) const { system_executor().defer( promise_invoker(p_, ASIO_MOVE_CAST(F)(f)), a); } friend bool operator==(const promise_executor& a, const promise_executor& b) ASIO_NOEXCEPT { return a.p_ == b.p_; } friend bool operator!=(const promise_executor& a, const promise_executor& b) ASIO_NOEXCEPT { return a.p_ != b.p_; } private: shared_ptr > p_; }; // The base class for all completion handlers that create promises. template class promise_creator { public: typedef promise_executor executor_type; executor_type get_executor() const ASIO_NOEXCEPT { return executor_type(p_); } typedef std::future future_type; future_type get_future() { return p_->get_future(); } protected: template void create_promise(const Allocator& a) { ASIO_REBIND_ALLOC(Allocator, char) b(a); p_ = std::allocate_shared>(b, std::allocator_arg, b); } shared_ptr > p_; }; // For completion signature void(). class promise_handler_0 : public promise_creator { public: void operator()() { this->p_->set_value(); } }; // For completion signature void(error_code). class promise_handler_ec_0 : public promise_creator { public: void operator()(const asio::error_code& ec) { if (ec) { this->p_->set_exception( std::make_exception_ptr( asio::system_error(ec))); } else { this->p_->set_value(); } } }; // For completion signature void(exception_ptr). class promise_handler_ex_0 : public promise_creator { public: void operator()(const std::exception_ptr& ex) { if (ex) { this->p_->set_exception(ex); } else { this->p_->set_value(); } } }; // For completion signature void(T). template class promise_handler_1 : public promise_creator { public: template void operator()(ASIO_MOVE_ARG(Arg) arg) { this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg)); } }; // For completion signature void(error_code, T). template class promise_handler_ec_1 : public promise_creator { public: template void operator()(const asio::error_code& ec, ASIO_MOVE_ARG(Arg) arg) { if (ec) { this->p_->set_exception( std::make_exception_ptr( asio::system_error(ec))); } else this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg)); } }; // For completion signature void(exception_ptr, T). template class promise_handler_ex_1 : public promise_creator { public: template void operator()(const std::exception_ptr& ex, ASIO_MOVE_ARG(Arg) arg) { if (ex) this->p_->set_exception(ex); else this->p_->set_value(ASIO_MOVE_CAST(Arg)(arg)); } }; // For completion signature void(T1, ..., Tn); template class promise_handler_n : public promise_creator { public: #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(ASIO_MOVE_ARG(Args)... args) { this->p_->set_value( std::forward_as_tuple( ASIO_MOVE_CAST(Args)(args)...)); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) #define ASIO_PRIVATE_CALL_OP_DEF(n) \ template \ void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \ {\ this->p_->set_value( \ std::forward_as_tuple( \ ASIO_VARIADIC_MOVE_ARGS(n))); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF) #undef ASIO_PRIVATE_CALL_OP_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; // For completion signature void(error_code, T1, ..., Tn); template class promise_handler_ec_n : public promise_creator { public: #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(const asio::error_code& ec, ASIO_MOVE_ARG(Args)... args) { if (ec) { this->p_->set_exception( std::make_exception_ptr( asio::system_error(ec))); } else { this->p_->set_value( std::forward_as_tuple( ASIO_MOVE_CAST(Args)(args)...)); } } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) #define ASIO_PRIVATE_CALL_OP_DEF(n) \ template \ void operator()(const asio::error_code& ec, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ {\ if (ec) \ { \ this->p_->set_exception( \ std::make_exception_ptr( \ asio::system_error(ec))); \ } \ else \ { \ this->p_->set_value( \ std::forward_as_tuple( \ ASIO_VARIADIC_MOVE_ARGS(n))); \ } \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF) #undef ASIO_PRIVATE_CALL_OP_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; // For completion signature void(exception_ptr, T1, ..., Tn); template class promise_handler_ex_n : public promise_creator { public: #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(const std::exception_ptr& ex, ASIO_MOVE_ARG(Args)... args) { if (ex) this->p_->set_exception(ex); else { this->p_->set_value( std::forward_as_tuple( ASIO_MOVE_CAST(Args)(args)...)); } } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) #define ASIO_PRIVATE_CALL_OP_DEF(n) \ template \ void operator()(const std::exception_ptr& ex, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ {\ if (ex) \ this->p_->set_exception(ex); \ else \ { \ this->p_->set_value( \ std::forward_as_tuple( \ ASIO_VARIADIC_MOVE_ARGS(n))); \ } \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF) #undef ASIO_PRIVATE_CALL_OP_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; // Helper template to choose the appropriate concrete promise handler // implementation based on the supplied completion signature. template class promise_handler_selector; template <> class promise_handler_selector : public promise_handler_0 {}; template <> class promise_handler_selector : public promise_handler_ec_0 {}; template <> class promise_handler_selector : public promise_handler_ex_0 {}; template class promise_handler_selector : public promise_handler_1 {}; template class promise_handler_selector : public promise_handler_ec_1 {}; template class promise_handler_selector : public promise_handler_ex_1 {}; #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template class promise_handler_selector : public promise_handler_n > {}; template class promise_handler_selector : public promise_handler_ec_n > {}; template class promise_handler_selector : public promise_handler_ex_n > {}; #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) #define ASIO_PRIVATE_PROMISE_SELECTOR_DEF(n) \ template \ class promise_handler_selector< \ void(Arg, ASIO_VARIADIC_TARGS(n))> \ : public promise_handler_n< \ std::tuple > {}; \ \ template \ class promise_handler_selector< \ void(asio::error_code, Arg, ASIO_VARIADIC_TARGS(n))> \ : public promise_handler_ec_n< \ std::tuple > {}; \ \ template \ class promise_handler_selector< \ void(std::exception_ptr, Arg, ASIO_VARIADIC_TARGS(n))> \ : public promise_handler_ex_n< \ std::tuple > {}; \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_PROMISE_SELECTOR_DEF) #undef ASIO_PRIVATE_PROMISE_SELECTOR_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) // Completion handlers produced from the use_future completion token, when not // using use_future::operator(). template class promise_handler : public promise_handler_selector { public: typedef Allocator allocator_type; typedef void result_type; promise_handler(use_future_t u) : allocator_(u.get_allocator()) { this->create_promise(allocator_); } allocator_type get_allocator() const ASIO_NOEXCEPT { return allocator_; } private: Allocator allocator_; }; template inline void asio_handler_invoke(Function& f, promise_handler* h) { typename promise_handler::executor_type ex(h->get_executor()); ex.dispatch(ASIO_MOVE_CAST(Function)(f), std::allocator()); } template inline void asio_handler_invoke(const Function& f, promise_handler* h) { typename promise_handler::executor_type ex(h->get_executor()); ex.dispatch(f, std::allocator()); } // Helper base class for async_result specialisation. template class promise_async_result { public: typedef promise_handler completion_handler_type; typedef typename completion_handler_type::future_type return_type; explicit promise_async_result(completion_handler_type& h) : future_(h.get_future()) { } return_type get() { return ASIO_MOVE_CAST(return_type)(future_); } private: return_type future_; }; // Return value from use_future::operator(). template class packaged_token { public: packaged_token(Function f, const Allocator& a) : function_(ASIO_MOVE_CAST(Function)(f)), allocator_(a) { } //private: Function function_; Allocator allocator_; }; // Completion handlers produced from the use_future completion token, when // using use_future::operator(). template class packaged_handler : public promise_creator { public: typedef Allocator allocator_type; typedef void result_type; packaged_handler(packaged_token t) : function_(ASIO_MOVE_CAST(Function)(t.function_)), allocator_(t.allocator_) { this->create_promise(allocator_); } allocator_type get_allocator() const ASIO_NOEXCEPT { return allocator_; } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(ASIO_MOVE_ARG(Args)... args) { (promise_invoke_and_set)(*this->p_, function_, ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) void operator()() { (promise_invoke_and_set)(*this->p_, function_); } #define ASIO_PRIVATE_CALL_OP_DEF(n) \ template \ void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \ {\ (promise_invoke_and_set)(*this->p_, \ function_, ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CALL_OP_DEF) #undef ASIO_PRIVATE_CALL_OP_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) private: Function function_; Allocator allocator_; }; template inline void asio_handler_invoke(Function& f, packaged_handler* h) { typename packaged_handler::executor_type ex(h->get_executor()); ex.dispatch(ASIO_MOVE_CAST(Function)(f), std::allocator()); } template inline void asio_handler_invoke(const Function& f, packaged_handler* h) { typename packaged_handler::executor_type ex(h->get_executor()); ex.dispatch(f, std::allocator()); } // Helper base class for async_result specialisation. template class packaged_async_result { public: typedef packaged_handler completion_handler_type; typedef typename completion_handler_type::future_type return_type; explicit packaged_async_result(completion_handler_type& h) : future_(h.get_future()) { } return_type get() { return ASIO_MOVE_CAST(return_type)(future_); } private: return_type future_; }; } // namespace detail template template inline detail::packaged_token::type, Allocator> use_future_t::operator()(ASIO_MOVE_ARG(Function) f) const { return detail::packaged_token::type, Allocator>( ASIO_MOVE_CAST(Function)(f), allocator_); } #if !defined(GENERATING_DOCUMENTATION) #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template class async_result, Result(Args...)> : public detail::promise_async_result< void(typename decay::type...), Allocator> { public: explicit async_result( typename detail::promise_async_result::type...), Allocator>::completion_handler_type& h) : detail::promise_async_result< void(typename decay::type...), Allocator>(h) { } }; template class async_result, Result(Args...)> : public detail::packaged_async_result::type> { public: explicit async_result( typename detail::packaged_async_result::type>::completion_handler_type& h) : detail::packaged_async_result::type>(h) { } }; #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template class async_result, Result()> : public detail::promise_async_result { public: explicit async_result( typename detail::promise_async_result< void(), Allocator>::completion_handler_type& h) : detail::promise_async_result(h) { } }; template class async_result, Result()> : public detail::packaged_async_result::type> { public: explicit async_result( typename detail::packaged_async_result::type>::completion_handler_type& h) : detail::packaged_async_result::type>(h) { } }; #define ASIO_PRIVATE_ASYNC_RESULT_DEF(n) \ template \ class async_result, \ Result(ASIO_VARIADIC_TARGS(n))> \ : public detail::promise_async_result< \ void(ASIO_VARIADIC_DECAY(n)), Allocator> \ { \ public: \ explicit async_result( \ typename detail::promise_async_result< \ void(ASIO_VARIADIC_DECAY(n)), \ Allocator>::completion_handler_type& h) \ : detail::promise_async_result< \ void(ASIO_VARIADIC_DECAY(n)), Allocator>(h) \ { \ } \ }; \ \ template \ class async_result, \ Result(ASIO_VARIADIC_TARGS(n))> \ : public detail::packaged_async_result::type> \ { \ public: \ explicit async_result( \ typename detail::packaged_async_result::type \ >::completion_handler_type& h) \ : detail::packaged_async_result::type>(h) \ { \ } \ }; \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_RESULT_DEF) #undef ASIO_PRIVATE_ASYNC_RESULT_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_USE_FUTURE_HPP galera-4-26.4.25/asio/asio/impl/io_context.ipp000644 000164 177776 00000007240 15107057155 022233 0ustar00jenkinsnogroup000000 000000 // // impl/io_context.ipp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_IO_CONTEXT_IPP #define ASIO_IMPL_IO_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_context.hpp" #include "asio/detail/concurrency_hint.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/service_registry.hpp" #include "asio/detail/throw_error.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else # include "asio/detail/scheduler.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { io_context::io_context() : impl_(add_impl(new impl_type(*this, ASIO_CONCURRENCY_HINT_DEFAULT, false))) { } io_context::io_context(int concurrency_hint) : impl_(add_impl(new impl_type(*this, concurrency_hint == 1 ? ASIO_CONCURRENCY_HINT_1 : concurrency_hint, false))) { } io_context::impl_type& io_context::add_impl(io_context::impl_type* impl) { asio::detail::scoped_ptr scoped_impl(impl); asio::add_service(*this, scoped_impl.get()); return *scoped_impl.release(); } io_context::~io_context() { } io_context::count_type io_context::run() { asio::error_code ec; count_type s = impl_.run(ec); asio::detail::throw_error(ec); return s; } #if !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::run(asio::error_code& ec) { return impl_.run(ec); } #endif // !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::run_one() { asio::error_code ec; count_type s = impl_.run_one(ec); asio::detail::throw_error(ec); return s; } #if !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::run_one(asio::error_code& ec) { return impl_.run_one(ec); } #endif // !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::poll() { asio::error_code ec; count_type s = impl_.poll(ec); asio::detail::throw_error(ec); return s; } #if !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::poll(asio::error_code& ec) { return impl_.poll(ec); } #endif // !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::poll_one() { asio::error_code ec; count_type s = impl_.poll_one(ec); asio::detail::throw_error(ec); return s; } #if !defined(ASIO_NO_DEPRECATED) io_context::count_type io_context::poll_one(asio::error_code& ec) { return impl_.poll_one(ec); } #endif // !defined(ASIO_NO_DEPRECATED) void io_context::stop() { impl_.stop(); } bool io_context::stopped() const { return impl_.stopped(); } void io_context::restart() { impl_.restart(); } io_context::service::service(asio::io_context& owner) : execution_context::service(owner) { } io_context::service::~service() { } void io_context::service::shutdown() { #if !defined(ASIO_NO_DEPRECATED) shutdown_service(); #endif // !defined(ASIO_NO_DEPRECATED) } #if !defined(ASIO_NO_DEPRECATED) void io_context::service::shutdown_service() { } #endif // !defined(ASIO_NO_DEPRECATED) void io_context::service::notify_fork(io_context::fork_event ev) { #if !defined(ASIO_NO_DEPRECATED) fork_service(ev); #else // !defined(ASIO_NO_DEPRECATED) (void)ev; #endif // !defined(ASIO_NO_DEPRECATED) } #if !defined(ASIO_NO_DEPRECATED) void io_context::service::fork_service(io_context::fork_event) { } #endif // !defined(ASIO_NO_DEPRECATED) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_IO_CONTEXT_IPP galera-4-26.4.25/asio/asio/impl/detached.hpp000644 000164 177776 00000006034 15107057155 021620 0ustar00jenkinsnogroup000000 000000 // // impl/detached.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_DETACHED_HPP #define ASIO_IMPL_DETACHED_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Class to adapt a detached_t as a completion handler. class detached_handler { public: typedef void result_type; detached_handler(detached_t) { } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(Args...) { } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) void operator()() { } #define ASIO_PRIVATE_DETACHED_DEF(n) \ template \ void operator()(ASIO_VARIADIC_TARGS(n)) \ { \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_DETACHED_DEF) #undef ASIO_PRIVATE_DETACHED_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct async_result { typedef asio::detail::detached_handler completion_handler_type; typedef void return_type; explicit async_result(completion_handler_type&) { } void get() { } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken), ASIO_MOVE_ARG(Args)... args) { ASIO_MOVE_CAST(Initiation)(initiation)( detail::detached_handler(detached_t()), ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken)) { ASIO_MOVE_CAST(Initiation)(initiation)( detail::detached_handler(detached_t())); } #define ASIO_PRIVATE_INITIATE_DEF(n) \ template \ static return_type initiate( \ ASIO_MOVE_ARG(Initiation) initiation, \ ASIO_MOVE_ARG(RawCompletionToken), \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ ASIO_MOVE_CAST(Initiation)(initiation)( \ detail::detached_handler(detached_t()), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF) #undef ASIO_PRIVATE_INITIATE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_DETACHED_HPP galera-4-26.4.25/asio/asio/impl/write.hpp000644 000164 177776 00000104702 15107057155 021212 0ustar00jenkinsnogroup000000 000000 // // impl/write.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_WRITE_HPP #define ASIO_IMPL_WRITE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/base_from_completion_cond.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/consuming_buffers.hpp" #include "asio/detail/dependent_type.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template std::size_t write_buffer_sequence(SyncWriteStream& s, const ConstBufferSequence& buffers, const ConstBufferIterator&, CompletionCondition completion_condition, asio::error_code& ec) { ec = asio::error_code(); asio::detail::consuming_buffers tmp(buffers); while (!tmp.empty()) { if (std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, tmp.total_consumed()))) tmp.consume(s.write_some(tmp.prepare(max_size), ec)); else break; } return tmp.total_consumed();; } } // namespace detail template inline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_const_buffer_sequence::value >::type*) { return detail::write_buffer_sequence(s, buffers, asio::buffer_sequence_begin(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, typename enable_if< is_const_buffer_sequence::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, buffers, transfer_all(), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } template inline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, asio::error_code& ec, typename enable_if< is_const_buffer_sequence::value >::type*) { return write(s, buffers, transfer_all(), ec); } template inline std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, typename enable_if< is_const_buffer_sequence::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); std::size_t bytes_transferred = write(s, b.data(), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); b.consume(bytes_transferred); return bytes_transferred; } template inline std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } template inline std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return write(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ec); } template inline std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template inline std::size_t write(SyncWriteStream& s, asio::basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec) { return write(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t write(SyncWriteStream& s, asio::basic_streambuf& b) { return write(s, basic_streambuf_ref(b)); } template inline std::size_t write(SyncWriteStream& s, asio::basic_streambuf& b, asio::error_code& ec) { return write(s, basic_streambuf_ref(b), ec); } template inline std::size_t write(SyncWriteStream& s, asio::basic_streambuf& b, CompletionCondition completion_condition) { return write(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { std::size_t bytes_transferred = write(s, buffers.data(0, buffers.size()), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); buffers.consume(bytes_transferred); return bytes_transferred; } template inline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } template inline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return write(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ec); } template inline std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = write(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "write"); return bytes_transferred; } namespace detail { template class write_op : detail::base_from_completion_cond { public: write_op(AsyncWriteStream& stream, const ConstBufferSequence& buffers, CompletionCondition& completion_condition, WriteHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), stream_(stream), buffers_(buffers), start_(0), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) write_op(const write_op& other) : detail::base_from_completion_cond(other), stream_(other.stream_), buffers_(other.buffers_), start_(other.start_), handler_(other.handler_) { } write_op(write_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), stream_(other.stream_), buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)), start_(other.start_), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, buffers_.total_consumed()); do { stream_.async_write_some(buffers_.prepare(max_size), ASIO_MOVE_CAST(write_op)(*this)); return; default: buffers_.consume(bytes_transferred); if ((!ec && bytes_transferred == 0) || buffers_.empty()) break; max_size = this->check_for_completion(ec, buffers_.total_consumed()); } while (max_size > 0); handler_(ec, buffers_.total_consumed()); } } //private: typedef asio::detail::consuming_buffers buffers_type; AsyncWriteStream& stream_; buffers_type buffers_; int start_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, write_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, write_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( write_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, write_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, write_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void start_write_buffer_sequence_op(AsyncWriteStream& stream, const ConstBufferSequence& buffers, const ConstBufferIterator&, CompletionCondition& completion_condition, WriteHandler& handler) { detail::write_op( stream, buffers, completion_condition, handler)( asio::error_code(), 0, 1); } struct initiate_async_write_buffer_sequence { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, AsyncWriteStream* s, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); start_write_buffer_sequence_op(*s, buffers, asio::buffer_sequence_begin(buffers), completion_cond2.value, handler2.value); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::write_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::write_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::write_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::write_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_const_buffer_sequence::value >::type*) { return async_initiate( detail::initiate_async_write_buffer_sequence(), handler, &s, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_const_buffer_sequence::value >::type*) { return async_initiate( detail::initiate_async_write_buffer_sequence(), handler, &s, buffers, transfer_all()); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class write_dynbuf_v1_op { public: template write_dynbuf_v1_op(AsyncWriteStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, CompletionCondition& completion_condition, WriteHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), completion_condition_( ASIO_MOVE_CAST(CompletionCondition)(completion_condition)), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) write_dynbuf_v1_op(const write_dynbuf_v1_op& other) : stream_(other.stream_), buffers_(other.buffers_), completion_condition_(other.completion_condition_), handler_(other.handler_) { } write_dynbuf_v1_op(write_dynbuf_v1_op&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), completion_condition_( ASIO_MOVE_CAST(CompletionCondition)( other.completion_condition_)), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { switch (start) { case 1: async_write(stream_, buffers_.data(), ASIO_MOVE_CAST(CompletionCondition)(completion_condition_), ASIO_MOVE_CAST(write_dynbuf_v1_op)(*this)); return; default: buffers_.consume(bytes_transferred); handler_(ec, static_cast(bytes_transferred)); } } //private: AsyncWriteStream& stream_; DynamicBuffer_v1 buffers_; CompletionCondition completion_condition_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, write_dynbuf_v1_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, write_dynbuf_v1_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( write_dynbuf_v1_op* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, write_dynbuf_v1_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, write_dynbuf_v1_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_write_dynbuf_v1 { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, AsyncWriteStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); write_dynbuf_v1_op::type, CompletionCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), completion_cond2.value, handler2.value)( asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::write_dynbuf_v1_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::write_dynbuf_v1_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::write_dynbuf_v1_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::write_dynbuf_v1_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_write(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ASIO_MOVE_CAST(WriteHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_initiate( detail::initiate_async_write_dynbuf_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, asio::basic_streambuf& b, ASIO_MOVE_ARG(WriteHandler) handler) { return async_write(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(WriteHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, asio::basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler) { return async_write(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ASIO_MOVE_CAST(WriteHandler)(handler)); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class write_dynbuf_v2_op { public: template write_dynbuf_v2_op(AsyncWriteStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, CompletionCondition& completion_condition, WriteHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), completion_condition_( ASIO_MOVE_CAST(CompletionCondition)(completion_condition)), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) write_dynbuf_v2_op(const write_dynbuf_v2_op& other) : stream_(other.stream_), buffers_(other.buffers_), completion_condition_(other.completion_condition_), handler_(other.handler_) { } write_dynbuf_v2_op(write_dynbuf_v2_op&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), completion_condition_( ASIO_MOVE_CAST(CompletionCondition)( other.completion_condition_)), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { switch (start) { case 1: async_write(stream_, buffers_.data(0, buffers_.size()), ASIO_MOVE_CAST(CompletionCondition)(completion_condition_), ASIO_MOVE_CAST(write_dynbuf_v2_op)(*this)); return; default: buffers_.consume(bytes_transferred); handler_(ec, static_cast(bytes_transferred)); } } //private: AsyncWriteStream& stream_; DynamicBuffer_v2 buffers_; CompletionCondition completion_condition_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, write_dynbuf_v2_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, write_dynbuf_v2_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( write_dynbuf_v2_op* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, write_dynbuf_v2_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, write_dynbuf_v2_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_write_dynbuf_v2 { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, AsyncWriteStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); write_dynbuf_v2_op::type, CompletionCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), completion_cond2.value, handler2.value)( asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::write_dynbuf_v2_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::write_dynbuf_v2_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::write_dynbuf_v2_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::write_dynbuf_v2_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_write(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ASIO_MOVE_CAST(WriteHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_initiate( detail::initiate_async_write_dynbuf_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_WRITE_HPP galera-4-26.4.25/asio/asio/impl/read_until.hpp000644 000164 177776 00000305512 15107057155 022210 0ustar00jenkinsnogroup000000 000000 // // impl/read_until.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_READ_UNTIL_HPP #define ASIO_IMPL_READ_UNTIL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include #include #include #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/buffer.hpp" #include "asio/buffers_iterator.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Algorithm that finds a subsequence of equal values in a sequence. Returns // (iterator,true) if a full match was found, in which case the iterator // points to the beginning of the match. Returns (iterator,false) if a // partial match was found at the end of the first sequence, in which case // the iterator points to the beginning of the partial match. Returns // (last1,false) if no full or partial match was found. template std::pair partial_search( Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator2 last2) { for (Iterator1 iter1 = first1; iter1 != last1; ++iter1) { Iterator1 test_iter1 = iter1; Iterator2 test_iter2 = first2; for (;; ++test_iter1, ++test_iter2) { if (test_iter2 == last2) return std::make_pair(iter1, true); if (test_iter1 == last1) { if (test_iter2 != first2) return std::make_pair(iter1, false); else break; } if (*test_iter1 != *test_iter2) break; } } return std::make_pair(last1, false); } } // namespace detail #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template inline std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = b.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. iterator iter = std::find(start_pos, end, delim); if (iter != end) { // Found a match. We're done. ec = asio::error_code(); return iter - begin + 1; } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); b.commit(s.read_some(b.prepare(bytes_to_read), ec)); if (ec) return 0; } } template inline std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = b.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = detail::partial_search( start_pos, end, delim.begin(), delim.end()); if (result.first != end) { if (result.second) { // Full match. We're done. ec = asio::error_code(); return result.first - begin + delim.length(); } else { // Partial match. Next search needs to start from beginning of match. search_position = result.first - begin; } } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); b.commit(s.read_some(b.prepare(bytes_to_read), ec)); if (ec) return 0; } } #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), expr, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = b.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. boost::match_results >::allocator_type> match_results; if (regex_search(start_pos, end, match_results, expr, boost::match_default | boost::match_partial)) { if (match_results[0].matched) { // Full match. We're done. ec = asio::error_code(); return match_results[0].second - begin; } else { // Partial match. Next search needs to start from beginning of match. search_position = match_results[0].first - begin; } } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); b.commit(s.read_some(b.prepare(bytes_to_read), ec)); if (ec) return 0; } } #endif // defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), match_condition, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, asio::error_code& ec, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = b.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = match_condition(start_pos, end); if (result.second) { // Full match. We're done. ec = asio::error_code(); return result.first - begin; } else if (result.first != end) { // Partial match. Next search needs to start from beginning of match. search_position = result.first - begin; } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); b.commit(s.read_some(b.prepare(bytes_to_read), ec)); if (ec) return 0; } } #if !defined(ASIO_NO_IOSTREAM) template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim) { return read_until(s, basic_streambuf_ref(b), delim); } template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim, asio::error_code& ec) { return read_until(s, basic_streambuf_ref(b), delim, ec); } template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim) { return read_until(s, basic_streambuf_ref(b), delim); } template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec) { return read_until(s, basic_streambuf_ref(b), delim, ec); } #if defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr) { return read_until(s, basic_streambuf_ref(b), expr); } template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, asio::error_code& ec) { return read_until(s, basic_streambuf_ref(b), expr, ec); } #endif // defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, typename enable_if::value>::type*) { return read_until(s, basic_streambuf_ref(b), match_condition); } template inline std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, asio::error_code& ec, typename enable_if::value>::type*) { return read_until(s, basic_streambuf_ref(b), match_condition, ec); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template inline std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, char delim, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, char delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { DynamicBuffer_v2& b = buffers; std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(b).data(0, b.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. iterator iter = std::find(start_pos, end, delim); if (iter != end) { // Found a match. We're done. ec = asio::error_code(); return iter - begin + 1; } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); std::size_t pos = b.size(); b.grow(bytes_to_read); std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec); b.shrink(bytes_to_read - bytes_transferred); if (ec) return 0; } } template inline std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { DynamicBuffer_v2& b = buffers; std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(b).data(0, b.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = detail::partial_search( start_pos, end, delim.begin(), delim.end()); if (result.first != end) { if (result.second) { // Full match. We're done. ec = asio::error_code(); return result.first - begin + delim.length(); } else { // Partial match. Next search needs to start from beginning of match. search_position = result.first - begin; } } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); std::size_t pos = b.size(); b.grow(bytes_to_read); std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec); b.shrink(bytes_to_read - bytes_transferred); if (ec) return 0; } } #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), expr, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { DynamicBuffer_v2& b = buffers; std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(b).data(0, b.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. boost::match_results >::allocator_type> match_results; if (regex_search(start_pos, end, match_results, expr, boost::match_default | boost::match_partial)) { if (match_results[0].matched) { // Full match. We're done. ec = asio::error_code(); return match_results[0].second - begin; } else { // Partial match. Next search needs to start from beginning of match. search_position = match_results[0].first - begin; } } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); std::size_t pos = b.size(); b.grow(bytes_to_read); std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec); b.shrink(bytes_to_read - bytes_transferred); if (ec) return 0; } } #endif // defined(ASIO_HAS_BOOST_REGEX) template inline std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read_until(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), match_condition, ec); asio::detail::throw_error(ec, "read_until"); return bytes_transferred; } template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, asio::error_code& ec, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type*) { DynamicBuffer_v2& b = buffers; std::size_t search_position = 0; for (;;) { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(b).data(0, b.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = match_condition(start_pos, end); if (result.second) { // Full match. We're done. ec = asio::error_code(); return result.first - begin; } else if (result.first != end) { // Partial match. Next search needs to start from beginning of match. search_position = result.first - begin; } else { // No match. Next search can start with the new data. search_position = end - begin; } // Check if buffer is full. if (b.size() == b.max_size()) { ec = error::not_found; return 0; } // Need more data. std::size_t bytes_to_read = std::min( std::max(512, b.capacity() - b.size()), std::min(65536, b.max_size() - b.size())); std::size_t pos = b.size(); b.grow(bytes_to_read); std::size_t bytes_transferred = s.read_some(b.data(pos, bytes_to_read), ec); b.shrink(bytes_to_read - bytes_transferred); if (ec) return 0; } } #endif // !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class read_until_delim_op_v1 { public: template read_until_delim_op_v1(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, char delim, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), delim_(delim), start_(0), search_position_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_delim_op_v1(const read_until_delim_op_v1& other) : stream_(other.stream_), buffers_(other.buffers_), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), handler_(other.handler_) { } read_until_delim_op_v1(read_until_delim_op_v1&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t bytes_to_read; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = buffers_.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. iterator iter = std::find(start_pos, end, delim_); if (iter != end) { // Found a match. We're done. search_position_ = iter - begin + 1; bytes_to_read = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read = 0; } // Need to read some more data. else { // Next search can start with the new data. search_position_ = end - begin; bytes_to_read = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read == 0) break; // Start a new asynchronous read op_v1eration to obtain more data. stream_.async_read_some(buffers_.prepare(bytes_to_read), ASIO_MOVE_CAST(read_until_delim_op_v1)(*this)); return; default: buffers_.commit(bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v1 buffers_; char delim_; int start_; std::size_t search_position_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_delim_op_v1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_delim_op_v1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_delim_op_v1* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_delim_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_delim_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_delim_v1 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_delim_op_v1::type, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_delim_op_v1, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_delim_op_v1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_delim_op_v1, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_delim_op_v1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_initiate( detail::initiate_async_read_until_delim_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim); } namespace detail { template class read_until_delim_string_op_v1 { public: template read_until_delim_string_op_v1(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, const std::string& delim, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), delim_(delim), start_(0), search_position_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_delim_string_op_v1(const read_until_delim_string_op_v1& other) : stream_(other.stream_), buffers_(other.buffers_), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), handler_(other.handler_) { } read_until_delim_string_op_v1(read_until_delim_string_op_v1&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), delim_(ASIO_MOVE_CAST(std::string)(other.delim_)), start_(other.start_), search_position_(other.search_position_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t bytes_to_read; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = buffers_.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = detail::partial_search( start_pos, end, delim_.begin(), delim_.end()); if (result.first != end && result.second) { // Full match. We're done. search_position_ = result.first - begin + delim_.length(); bytes_to_read = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read = 0; } // Need to read some more data. else { if (result.first != end) { // Partial match. Next search needs to start from beginning of // match. search_position_ = result.first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read == 0) break; // Start a new asynchronous read op_v1eration to obtain more data. stream_.async_read_some(buffers_.prepare(bytes_to_read), ASIO_MOVE_CAST(read_until_delim_string_op_v1)(*this)); return; default: buffers_.commit(bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v1 buffers_; std::string delim_; int start_; std::size_t search_position_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_delim_string_op_v1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_delim_string_op_v1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_delim_string_op_v1* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_delim_string_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_delim_string_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_delim_string_v1 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const std::string& delim) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_delim_string_op_v1::type, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), delim, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_delim_string_op_v1, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_delim_string_op_v1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_delim_string_op_v1, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_delim_string_op_v1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_initiate( detail::initiate_async_read_until_delim_string_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), static_cast(delim)); } #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) namespace detail { template class read_until_expr_op_v1 { public: template read_until_expr_op_v1(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, const boost::regex& expr, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), expr_(expr), start_(0), search_position_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_expr_op_v1(const read_until_expr_op_v1& other) : stream_(other.stream_), buffers_(other.buffers_), expr_(other.expr_), start_(other.start_), search_position_(other.search_position_), handler_(other.handler_) { } read_until_expr_op_v1(read_until_expr_op_v1&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), expr_(other.expr_), start_(other.start_), search_position_(other.search_position_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t bytes_to_read; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = buffers_.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. boost::match_results >::allocator_type> match_results; bool match = regex_search(start_pos, end, match_results, expr_, boost::match_default | boost::match_partial); if (match && match_results[0].matched) { // Full match. We're done. search_position_ = match_results[0].second - begin; bytes_to_read = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read = 0; } // Need to read some more data. else { if (match) { // Partial match. Next search needs to start from beginning of // match. search_position_ = match_results[0].first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read == 0) break; // Start a new asynchronous read op_v1eration to obtain more data. stream_.async_read_some(buffers_.prepare(bytes_to_read), ASIO_MOVE_CAST(read_until_expr_op_v1)(*this)); return; default: buffers_.commit(bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v1 buffers_; RegEx expr_; int start_; std::size_t search_position_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_expr_op_v1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_expr_op_v1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_expr_op_v1* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_expr_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_expr_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_expr_v1 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const RegEx& expr) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_expr_op_v1::type, RegEx, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), expr, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_expr_op_v1, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_expr_op_v1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_expr_op_v1, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_expr_op_v1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_initiate( detail::initiate_async_read_until_expr_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), expr); } #endif // defined(ASIO_HAS_BOOST_REGEX) namespace detail { template class read_until_match_op_v1 { public: template read_until_match_op_v1(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, MatchCondition match_condition, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), match_condition_(match_condition), start_(0), search_position_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_match_op_v1(const read_until_match_op_v1& other) : stream_(other.stream_), buffers_(other.buffers_), match_condition_(other.match_condition_), start_(other.start_), search_position_(other.search_position_), handler_(other.handler_) { } read_until_match_op_v1(read_until_match_op_v1&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), match_condition_(other.match_condition_), start_(other.start_), search_position_(other.search_position_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t bytes_to_read; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v1::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = buffers_.data(); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = match_condition_(start_pos, end); if (result.second) { // Full match. We're done. search_position_ = result.first - begin; bytes_to_read = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read = 0; } // Need to read some more data. else { if (result.first != end) { // Partial match. Next search needs to start from beginning of // match. search_position_ = result.first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read == 0) break; // Start a new asynchronous read op_v1eration to obtain more data. stream_.async_read_some(buffers_.prepare(bytes_to_read), ASIO_MOVE_CAST(read_until_match_op_v1)(*this)); return; default: buffers_.commit(bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v1 buffers_; MatchCondition match_condition_; int start_; std::size_t search_position_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_match_op_v1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_match_op_v1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_match_op_v1* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_match_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_match_op_v1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_match_v1 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_match_op_v1::type, MatchCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), match_condition, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_match_op_v1, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_match_op_v1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_match_op_v1, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_match_op_v1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_initiate( detail::initiate_async_read_until_match_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), match_condition); } #if !defined(ASIO_NO_IOSTREAM) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, char delim, ASIO_MOVE_ARG(ReadHandler) handler) { return async_read_until(s, basic_streambuf_ref(b), delim, ASIO_MOVE_CAST(ReadHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler) { return async_read_until(s, basic_streambuf_ref(b), delim, ASIO_MOVE_CAST(ReadHandler)(handler)); } #if defined(ASIO_HAS_BOOST_REGEX) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler) { return async_read_until(s, basic_streambuf_ref(b), expr, ASIO_MOVE_CAST(ReadHandler)(handler)); } #endif // defined(ASIO_HAS_BOOST_REGEX) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if::value>::type*) { return async_read_until(s, basic_streambuf_ref(b), match_condition, ASIO_MOVE_CAST(ReadHandler)(handler)); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class read_until_delim_op_v2 { public: template read_until_delim_op_v2(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, char delim, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), delim_(delim), start_(0), search_position_(0), bytes_to_read_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_delim_op_v2(const read_until_delim_op_v2& other) : stream_(other.stream_), buffers_(other.buffers_), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(other.handler_) { } read_until_delim_op_v2(read_until_delim_op_v2&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t pos; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(buffers_).data( 0, buffers_.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. iterator iter = std::find(start_pos, end, delim_); if (iter != end) { // Found a match. We're done. search_position_ = iter - begin + 1; bytes_to_read_ = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read_ = 0; } // Need to read some more data. else { // Next search can start with the new data. search_position_ = end - begin; bytes_to_read_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read_ == 0) break; // Start a new asynchronous read op_v2eration to obtain more data. pos = buffers_.size(); buffers_.grow(bytes_to_read_); stream_.async_read_some(buffers_.data(pos, bytes_to_read_), ASIO_MOVE_CAST(read_until_delim_op_v2)(*this)); return; default: buffers_.shrink(bytes_to_read_ - bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v2 buffers_; char delim_; int start_; std::size_t search_position_; std::size_t bytes_to_read_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_delim_op_v2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_delim_op_v2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_delim_op_v2* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_delim_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_delim_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_delim_v2 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, char delim) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_delim_op_v2::type, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_delim_op_v2, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_delim_op_v2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_delim_op_v2, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_delim_op_v2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, char delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_initiate( detail::initiate_async_read_until_delim_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim); } namespace detail { template class read_until_delim_string_op_v2 { public: template read_until_delim_string_op_v2(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, const std::string& delim, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), delim_(delim), start_(0), search_position_(0), bytes_to_read_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_delim_string_op_v2(const read_until_delim_string_op_v2& other) : stream_(other.stream_), buffers_(other.buffers_), delim_(other.delim_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(other.handler_) { } read_until_delim_string_op_v2(read_until_delim_string_op_v2&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), delim_(ASIO_MOVE_CAST(std::string)(other.delim_)), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t pos; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(buffers_).data( 0, buffers_.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = detail::partial_search( start_pos, end, delim_.begin(), delim_.end()); if (result.first != end && result.second) { // Full match. We're done. search_position_ = result.first - begin + delim_.length(); bytes_to_read_ = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read_ = 0; } // Need to read some more data. else { if (result.first != end) { // Partial match. Next search needs to start from beginning of // match. search_position_ = result.first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read_ == 0) break; // Start a new asynchronous read op_v2eration to obtain more data. pos = buffers_.size(); buffers_.grow(bytes_to_read_); stream_.async_read_some(buffers_.data(pos, bytes_to_read_), ASIO_MOVE_CAST(read_until_delim_string_op_v2)(*this)); return; default: buffers_.shrink(bytes_to_read_ - bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v2 buffers_; std::string delim_; int start_; std::size_t search_position_; std::size_t bytes_to_read_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_delim_string_op_v2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_delim_string_op_v2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_delim_string_op_v2* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_delim_string_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_delim_string_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_delim_string_v2 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, const std::string& delim) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_delim_string_op_v2::type, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), delim, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_delim_string_op_v2, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_delim_string_op_v2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_delim_string_op_v2, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_delim_string_op_v2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_initiate( detail::initiate_async_read_until_delim_string_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), static_cast(delim)); } #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) namespace detail { template class read_until_expr_op_v2 { public: template read_until_expr_op_v2(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, const boost::regex& expr, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), expr_(expr), start_(0), search_position_(0), bytes_to_read_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_expr_op_v2(const read_until_expr_op_v2& other) : stream_(other.stream_), buffers_(other.buffers_), expr_(other.expr_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(other.handler_) { } read_until_expr_op_v2(read_until_expr_op_v2&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), expr_(other.expr_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t pos; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(buffers_).data( 0, buffers_.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. boost::match_results >::allocator_type> match_results; bool match = regex_search(start_pos, end, match_results, expr_, boost::match_default | boost::match_partial); if (match && match_results[0].matched) { // Full match. We're done. search_position_ = match_results[0].second - begin; bytes_to_read_ = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read_ = 0; } // Need to read some more data. else { if (match) { // Partial match. Next search needs to start from beginning of // match. search_position_ = match_results[0].first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read_ == 0) break; // Start a new asynchronous read op_v2eration to obtain more data. pos = buffers_.size(); buffers_.grow(bytes_to_read_); stream_.async_read_some(buffers_.data(pos, bytes_to_read_), ASIO_MOVE_CAST(read_until_expr_op_v2)(*this)); return; default: buffers_.shrink(bytes_to_read_ - bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v2 buffers_; RegEx expr_; int start_; std::size_t search_position_; std::size_t bytes_to_read_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_expr_op_v2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_expr_op_v2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_expr_op_v2* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_expr_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_expr_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_expr_v2 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, const RegEx& expr) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_expr_op_v2::type, RegEx, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), expr, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_expr_op_v2, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_expr_op_v2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_expr_op_v2, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_expr_op_v2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_initiate( detail::initiate_async_read_until_expr_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), expr); } #endif // defined(ASIO_HAS_BOOST_REGEX) namespace detail { template class read_until_match_op_v2 { public: template read_until_match_op_v2(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, MatchCondition match_condition, ReadHandler& handler) : stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), match_condition_(match_condition), start_(0), search_position_(0), bytes_to_read_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_until_match_op_v2(const read_until_match_op_v2& other) : stream_(other.stream_), buffers_(other.buffers_), match_condition_(other.match_condition_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(other.handler_) { } read_until_match_op_v2(read_until_match_op_v2&& other) : stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), match_condition_(other.match_condition_), start_(other.start_), search_position_(other.search_position_), bytes_to_read_(other.bytes_to_read_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { const std::size_t not_found = (std::numeric_limits::max)(); std::size_t pos; switch (start_ = start) { case 1: for (;;) { { // Determine the range of the data to be searched. typedef typename DynamicBuffer_v2::const_buffers_type buffers_type; typedef buffers_iterator iterator; buffers_type data_buffers = const_cast(buffers_).data( 0, buffers_.size()); iterator begin = iterator::begin(data_buffers); iterator start_pos = begin + search_position_; iterator end = iterator::end(data_buffers); // Look for a match. std::pair result = match_condition_(start_pos, end); if (result.second) { // Full match. We're done. search_position_ = result.first - begin; bytes_to_read_ = 0; } // No match yet. Check if buffer is full. else if (buffers_.size() == buffers_.max_size()) { search_position_ = not_found; bytes_to_read_ = 0; } // Need to read some more data. else { if (result.first != end) { // Partial match. Next search needs to start from beginning of // match. search_position_ = result.first - begin; } else { // Next search can start with the new data. search_position_ = end - begin; } bytes_to_read_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(65536, buffers_.max_size() - buffers_.size())); } } // Check if we're done. if (!start && bytes_to_read_ == 0) break; // Start a new asynchronous read op_v2eration to obtain more data. pos = buffers_.size(); buffers_.grow(bytes_to_read_); stream_.async_read_some(buffers_.data(pos, bytes_to_read_), ASIO_MOVE_CAST(read_until_match_op_v2)(*this)); return; default: buffers_.shrink(bytes_to_read_ - bytes_transferred); if (ec || bytes_transferred == 0) break; } const asio::error_code result_ec = (search_position_ == not_found) ? error::not_found : ec; const std::size_t result_n = (ec || search_position_ == not_found) ? 0 : search_position_; handler_(result_ec, result_n); } } //private: AsyncReadStream& stream_; DynamicBuffer_v2 buffers_; MatchCondition match_condition_; int start_; std::size_t search_position_; std::size_t bytes_to_read_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_until_match_op_v2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_until_match_op_v2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_until_match_op_v2* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_until_match_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_until_match_op_v2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_until_match_v2 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, MatchCondition match_condition) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); read_until_match_op_v2::type, MatchCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), match_condition, handler2.value)(asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_until_match_op_v2, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_until_match_op_v2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_until_match_op_v2, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_until_match_op_v2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type*) { return async_initiate( detail::initiate_async_read_until_match_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), match_condition); } #endif // !defined(ASIO_NO_EXTENSIONS) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_READ_UNTIL_HPP galera-4-26.4.25/asio/asio/impl/co_spawn.hpp000644 000164 177776 00000007344 15107057155 021675 0ustar00jenkinsnogroup000000 000000 // // impl/co_spawn.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_CO_SPAWN_HPP #define ASIO_IMPL_CO_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/awaitable.hpp" #include "asio/dispatch.hpp" #include "asio/post.hpp" #include "asio/use_awaitable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template awaitable co_spawn_entry_point( awaitable*, Executor ex, F f, Handler handler) { auto spawn_work = make_work_guard(ex); auto handler_work = make_work_guard(handler, ex); (void) co_await (post)(spawn_work.get_executor(), use_awaitable_t{}); bool done = false; try { T t = co_await f(); done = true; (dispatch)(handler_work.get_executor(), [handler = std::move(handler), t = std::move(t)]() mutable { handler(std::exception_ptr(), std::move(t)); }); } catch (...) { if (done) throw; (dispatch)(handler_work.get_executor(), [handler = std::move(handler), e = std::current_exception()]() mutable { handler(e, T()); }); } } template awaitable co_spawn_entry_point( awaitable*, Executor ex, F f, Handler handler) { auto spawn_work = make_work_guard(ex); auto handler_work = make_work_guard(handler, ex); (void) co_await (post)(spawn_work.get_executor(), use_awaitable_t{}); std::exception_ptr e = nullptr; try { co_await f(); } catch (...) { e = std::current_exception(); } (dispatch)(handler_work.get_executor(), [handler = std::move(handler), e]() mutable { handler(e); }); } struct initiate_co_spawn { template void operator()(Handler&& handler, const Executor& ex, F&& f) const { typedef typename result_of::type awaitable_type; typedef typename awaitable_type::executor_type executor_type; executor_type ex2(ex); auto a = (co_spawn_entry_point)(static_cast(nullptr), ex2, std::forward(f), std::forward(handler)); awaitable_handler(std::move(a), ex2).launch(); } }; } // namespace detail template inline ASIO_INITFN_RESULT_TYPE(CompletionToken, typename detail::awaitable_signature::type>::type) co_spawn(const Executor& ex, F&& f, CompletionToken&& token, typename enable_if< is_executor::value >::type*) { return async_initiate::type>>( detail::initiate_co_spawn(), token, ex, std::forward(f)); } template inline ASIO_INITFN_RESULT_TYPE(CompletionToken, typename detail::awaitable_signature::type>::type) co_spawn(ExecutionContext& ctx, F&& f, CompletionToken&& token, typename enable_if< is_convertible::value >::type*) { return (co_spawn)(ctx.get_executor(), std::forward(f), std::forward(token)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_CO_SPAWN_HPP galera-4-26.4.25/asio/asio/impl/src.hpp000644 000164 177776 00000006353 15107057155 020652 0ustar00jenkinsnogroup000000 000000 // // impl/src.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SRC_HPP #define ASIO_IMPL_SRC_HPP #define ASIO_SOURCE #include "asio/detail/config.hpp" #if defined(ASIO_HEADER_ONLY) # error Do not compile Asio library source with ASIO_HEADER_ONLY defined #endif #include "asio/impl/error.ipp" #include "asio/impl/error_code.ipp" #include "asio/impl/execution_context.ipp" #include "asio/impl/executor.ipp" #include "asio/impl/handler_alloc_hook.ipp" #include "asio/impl/io_context.ipp" #include "asio/impl/serial_port_base.ipp" #include "asio/impl/system_context.ipp" #include "asio/impl/thread_pool.ipp" #include "asio/detail/impl/buffer_sequence_adapter.ipp" #include "asio/detail/impl/descriptor_ops.ipp" #include "asio/detail/impl/dev_poll_reactor.ipp" #include "asio/detail/impl/epoll_reactor.ipp" #include "asio/detail/impl/eventfd_select_interrupter.ipp" #include "asio/detail/impl/handler_tracking.ipp" #include "asio/detail/impl/kqueue_reactor.ipp" #include "asio/detail/impl/null_event.ipp" #include "asio/detail/impl/pipe_select_interrupter.ipp" #include "asio/detail/impl/posix_event.ipp" #include "asio/detail/impl/posix_mutex.ipp" #include "asio/detail/impl/posix_thread.ipp" #include "asio/detail/impl/posix_tss_ptr.ipp" #include "asio/detail/impl/reactive_descriptor_service.ipp" #include "asio/detail/impl/reactive_serial_port_service.ipp" #include "asio/detail/impl/reactive_socket_service_base.ipp" #include "asio/detail/impl/resolver_service_base.ipp" #include "asio/detail/impl/scheduler.ipp" #include "asio/detail/impl/select_reactor.ipp" #include "asio/detail/impl/service_registry.ipp" #include "asio/detail/impl/signal_set_service.ipp" #include "asio/detail/impl/socket_ops.ipp" #include "asio/detail/impl/socket_select_interrupter.ipp" #include "asio/detail/impl/strand_executor_service.ipp" #include "asio/detail/impl/strand_service.ipp" #include "asio/detail/impl/throw_error.ipp" #include "asio/detail/impl/timer_queue_ptime.ipp" #include "asio/detail/impl/timer_queue_set.ipp" #include "asio/detail/impl/win_iocp_handle_service.ipp" #include "asio/detail/impl/win_iocp_io_context.ipp" #include "asio/detail/impl/win_iocp_serial_port_service.ipp" #include "asio/detail/impl/win_iocp_socket_service_base.ipp" #include "asio/detail/impl/win_event.ipp" #include "asio/detail/impl/win_mutex.ipp" #include "asio/detail/impl/win_object_handle_service.ipp" #include "asio/detail/impl/win_static_mutex.ipp" #include "asio/detail/impl/win_thread.ipp" #include "asio/detail/impl/win_tss_ptr.ipp" #include "asio/detail/impl/winrt_ssocket_service_base.ipp" #include "asio/detail/impl/winrt_timer_scheduler.ipp" #include "asio/detail/impl/winsock_init.ipp" #include "asio/generic/detail/impl/endpoint.ipp" #include "asio/ip/impl/address.ipp" #include "asio/ip/impl/address_v4.ipp" #include "asio/ip/impl/address_v6.ipp" #include "asio/ip/impl/host_name.ipp" #include "asio/ip/impl/network_v4.ipp" #include "asio/ip/impl/network_v6.ipp" #include "asio/ip/detail/impl/endpoint.ipp" #include "asio/local/detail/impl/endpoint.ipp" #endif // ASIO_IMPL_SRC_HPP galera-4-26.4.25/asio/asio/impl/handler_alloc_hook.ipp000644 000164 177776 00000003031 15107057155 023661 0ustar00jenkinsnogroup000000 000000 // // impl/handler_alloc_hook.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP #define ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/thread_context.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/detail/push_options.hpp" namespace asio { void* asio_handler_allocate(std::size_t size, ...) { #if !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) return detail::thread_info_base::allocate( detail::thread_context::thread_call_stack::top(), size); #else // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) return ::operator new(size); #endif // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) } void asio_handler_deallocate(void* pointer, std::size_t size, ...) { #if !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) detail::thread_info_base::deallocate( detail::thread_context::thread_call_stack::top(), pointer, size); #else // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) (void)size; ::operator delete(pointer); #endif // !defined(ASIO_DISABLE_SMALL_BLOCK_RECYCLING) } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_HANDLER_ALLOC_HOOK_IPP galera-4-26.4.25/asio/asio/impl/serial_port_base.ipp000644 000164 177776 00000030624 15107057155 023377 0ustar00jenkinsnogroup000000 000000 // // impl/serial_port_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SERIAL_PORT_BASE_IPP #define ASIO_IMPL_SERIAL_PORT_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/error.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/throw_exception.hpp" #if defined(GENERATING_DOCUMENTATION) # define ASIO_OPTION_STORAGE implementation_defined #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # define ASIO_OPTION_STORAGE DCB #else # define ASIO_OPTION_STORAGE termios #endif #include "asio/detail/push_options.hpp" namespace asio { ASIO_SYNC_OP_VOID serial_port_base::baud_rate::store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) storage.BaudRate = value_; #else speed_t baud; switch (value_) { // Do POSIX-specified rates first. case 0: baud = B0; break; case 50: baud = B50; break; case 75: baud = B75; break; case 110: baud = B110; break; case 134: baud = B134; break; case 150: baud = B150; break; case 200: baud = B200; break; case 300: baud = B300; break; case 600: baud = B600; break; case 1200: baud = B1200; break; case 1800: baud = B1800; break; case 2400: baud = B2400; break; case 4800: baud = B4800; break; case 9600: baud = B9600; break; case 19200: baud = B19200; break; case 38400: baud = B38400; break; // And now the extended ones conditionally. # ifdef B7200 case 7200: baud = B7200; break; # endif # ifdef B14400 case 14400: baud = B14400; break; # endif # ifdef B57600 case 57600: baud = B57600; break; # endif # ifdef B115200 case 115200: baud = B115200; break; # endif # ifdef B230400 case 230400: baud = B230400; break; # endif # ifdef B460800 case 460800: baud = B460800; break; # endif # ifdef B500000 case 500000: baud = B500000; break; # endif # ifdef B576000 case 576000: baud = B576000; break; # endif # ifdef B921600 case 921600: baud = B921600; break; # endif # ifdef B1000000 case 1000000: baud = B1000000; break; # endif # ifdef B1152000 case 1152000: baud = B1152000; break; # endif # ifdef B2000000 case 2000000: baud = B2000000; break; # endif # ifdef B3000000 case 3000000: baud = B3000000; break; # endif # ifdef B3500000 case 3500000: baud = B3500000; break; # endif # ifdef B4000000 case 4000000: baud = B4000000; break; # endif default: ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } # if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) ::cfsetspeed(&storage, baud); # else ::cfsetispeed(&storage, baud); ::cfsetospeed(&storage, baud); # endif #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID serial_port_base::baud_rate::load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) value_ = storage.BaudRate; #else speed_t baud = ::cfgetospeed(&storage); switch (baud) { // First do those specified by POSIX. case B0: value_ = 0; break; case B50: value_ = 50; break; case B75: value_ = 75; break; case B110: value_ = 110; break; case B134: value_ = 134; break; case B150: value_ = 150; break; case B200: value_ = 200; break; case B300: value_ = 300; break; case B600: value_ = 600; break; case B1200: value_ = 1200; break; case B1800: value_ = 1800; break; case B2400: value_ = 2400; break; case B4800: value_ = 4800; break; case B9600: value_ = 9600; break; case B19200: value_ = 19200; break; case B38400: value_ = 38400; break; // Now conditionally handle a bunch of extended rates. # ifdef B7200 case B7200: value_ = 7200; break; # endif # ifdef B14400 case B14400: value_ = 14400; break; # endif # ifdef B57600 case B57600: value_ = 57600; break; # endif # ifdef B115200 case B115200: value_ = 115200; break; # endif # ifdef B230400 case B230400: value_ = 230400; break; # endif # ifdef B460800 case B460800: value_ = 460800; break; # endif # ifdef B500000 case B500000: value_ = 500000; break; # endif # ifdef B576000 case B576000: value_ = 576000; break; # endif # ifdef B921600 case B921600: value_ = 921600; break; # endif # ifdef B1000000 case B1000000: value_ = 1000000; break; # endif # ifdef B1152000 case B1152000: value_ = 1152000; break; # endif # ifdef B2000000 case B2000000: value_ = 2000000; break; # endif # ifdef B3000000 case B3000000: value_ = 3000000; break; # endif # ifdef B3500000 case B3500000: value_ = 3500000; break; # endif # ifdef B4000000 case B4000000: value_ = 4000000; break; # endif default: value_ = 0; ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } serial_port_base::flow_control::flow_control( serial_port_base::flow_control::type t) : value_(t) { if (t != none && t != software && t != hardware) { std::out_of_range ex("invalid flow_control value"); asio::detail::throw_exception(ex); } } ASIO_SYNC_OP_VOID serial_port_base::flow_control::store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) storage.fOutxCtsFlow = FALSE; storage.fOutxDsrFlow = FALSE; storage.fTXContinueOnXoff = TRUE; storage.fDtrControl = DTR_CONTROL_ENABLE; storage.fDsrSensitivity = FALSE; storage.fOutX = FALSE; storage.fInX = FALSE; storage.fRtsControl = RTS_CONTROL_ENABLE; switch (value_) { case none: break; case software: storage.fOutX = TRUE; storage.fInX = TRUE; break; case hardware: storage.fOutxCtsFlow = TRUE; storage.fRtsControl = RTS_CONTROL_HANDSHAKE; break; default: break; } #else switch (value_) { case none: storage.c_iflag &= ~(IXOFF | IXON); # if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) storage.c_cflag &= ~CRTSCTS; # elif defined(__QNXNTO__) storage.c_cflag &= ~(IHFLOW | OHFLOW); # endif break; case software: storage.c_iflag |= IXOFF | IXON; # if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) storage.c_cflag &= ~CRTSCTS; # elif defined(__QNXNTO__) storage.c_cflag &= ~(IHFLOW | OHFLOW); # endif break; case hardware: # if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) storage.c_iflag &= ~(IXOFF | IXON); storage.c_cflag |= CRTSCTS; break; # elif defined(__QNXNTO__) storage.c_iflag &= ~(IXOFF | IXON); storage.c_cflag |= (IHFLOW | OHFLOW); break; # else ec = asio::error::operation_not_supported; ASIO_SYNC_OP_VOID_RETURN(ec); # endif default: break; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID serial_port_base::flow_control::load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (storage.fOutX && storage.fInX) { value_ = software; } else if (storage.fOutxCtsFlow && storage.fRtsControl == RTS_CONTROL_HANDSHAKE) { value_ = hardware; } else { value_ = none; } #else if (storage.c_iflag & (IXOFF | IXON)) { value_ = software; } # if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) else if (storage.c_cflag & CRTSCTS) { value_ = hardware; } # elif defined(__QNXNTO__) else if (storage.c_cflag & IHFLOW && storage.c_cflag & OHFLOW) { value_ = hardware; } # endif else { value_ = none; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } serial_port_base::parity::parity(serial_port_base::parity::type t) : value_(t) { if (t != none && t != odd && t != even) { std::out_of_range ex("invalid parity value"); asio::detail::throw_exception(ex); } } ASIO_SYNC_OP_VOID serial_port_base::parity::store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) switch (value_) { case none: storage.fParity = FALSE; storage.Parity = NOPARITY; break; case odd: storage.fParity = TRUE; storage.Parity = ODDPARITY; break; case even: storage.fParity = TRUE; storage.Parity = EVENPARITY; break; default: break; } #else switch (value_) { case none: storage.c_iflag |= IGNPAR; storage.c_cflag &= ~(PARENB | PARODD); break; case even: storage.c_iflag &= ~(IGNPAR | PARMRK); storage.c_iflag |= INPCK; storage.c_cflag |= PARENB; storage.c_cflag &= ~PARODD; break; case odd: storage.c_iflag &= ~(IGNPAR | PARMRK); storage.c_iflag |= INPCK; storage.c_cflag |= (PARENB | PARODD); break; default: break; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID serial_port_base::parity::load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (storage.Parity == EVENPARITY) { value_ = even; } else if (storage.Parity == ODDPARITY) { value_ = odd; } else { value_ = none; } #else if (storage.c_cflag & PARENB) { if (storage.c_cflag & PARODD) { value_ = odd; } else { value_ = even; } } else { value_ = none; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } serial_port_base::stop_bits::stop_bits( serial_port_base::stop_bits::type t) : value_(t) { if (t != one && t != onepointfive && t != two) { std::out_of_range ex("invalid stop_bits value"); asio::detail::throw_exception(ex); } } ASIO_SYNC_OP_VOID serial_port_base::stop_bits::store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) switch (value_) { case one: storage.StopBits = ONESTOPBIT; break; case onepointfive: storage.StopBits = ONE5STOPBITS; break; case two: storage.StopBits = TWOSTOPBITS; break; default: break; } #else switch (value_) { case one: storage.c_cflag &= ~CSTOPB; break; case two: storage.c_cflag |= CSTOPB; break; default: ec = asio::error::operation_not_supported; ASIO_SYNC_OP_VOID_RETURN(ec); } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID serial_port_base::stop_bits::load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (storage.StopBits == ONESTOPBIT) { value_ = one; } else if (storage.StopBits == ONE5STOPBITS) { value_ = onepointfive; } else if (storage.StopBits == TWOSTOPBITS) { value_ = two; } else { value_ = one; } #else value_ = (storage.c_cflag & CSTOPB) ? two : one; #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } serial_port_base::character_size::character_size(unsigned int t) : value_(t) { if (t < 5 || t > 8) { std::out_of_range ex("invalid character_size value"); asio::detail::throw_exception(ex); } } ASIO_SYNC_OP_VOID serial_port_base::character_size::store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) storage.ByteSize = value_; #else storage.c_cflag &= ~CSIZE; switch (value_) { case 5: storage.c_cflag |= CS5; break; case 6: storage.c_cflag |= CS6; break; case 7: storage.c_cflag |= CS7; break; case 8: storage.c_cflag |= CS8; break; default: break; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID serial_port_base::character_size::load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) value_ = storage.ByteSize; #else if ((storage.c_cflag & CSIZE) == CS5) { value_ = 5; } else if ((storage.c_cflag & CSIZE) == CS6) { value_ = 6; } else if ((storage.c_cflag & CSIZE) == CS7) { value_ = 7; } else if ((storage.c_cflag & CSIZE) == CS8) { value_ = 8; } else { // Hmmm, use 8 for now. value_ = 8; } #endif ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } // namespace asio #include "asio/detail/pop_options.hpp" #undef ASIO_OPTION_STORAGE #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_IMPL_SERIAL_PORT_BASE_IPP galera-4-26.4.25/asio/asio/impl/use_awaitable.hpp000644 000164 177776 00000015553 15107057155 022672 0ustar00jenkinsnogroup000000 000000 // // impl/use_awaitable.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_USE_AWAITABLE_HPP #define ASIO_IMPL_USE_AWAITABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class awaitable_handler_base : public awaitable_thread { public: typedef void result_type; typedef awaitable awaitable_type; // Construct from the entry point of a new thread of execution. awaitable_handler_base(awaitable a, const Executor& ex) : awaitable_thread(std::move(a), ex) { } // Transfer ownership from another awaitable_thread. explicit awaitable_handler_base(awaitable_thread* h) : awaitable_thread(std::move(*h)) { } protected: awaitable_frame* frame() noexcept { return static_cast*>(this->top_of_stack_); } }; template class awaitable_handler; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; void operator()() { this->frame()->attach_thread(this); this->frame()->return_void(); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; void operator()(const asio::error_code& ec) { this->frame()->attach_thread(this); if (ec) this->frame()->set_error(ec); else this->frame()->return_void(); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; void operator()(std::exception_ptr ex) { this->frame()->attach_thread(this); if (ex) this->frame()->set_except(ex); else this->frame()->return_void(); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; template void operator()(Arg&& arg) { this->frame()->attach_thread(this); this->frame()->return_value(std::forward(arg)); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; template void operator()(const asio::error_code& ec, Arg&& arg) { this->frame()->attach_thread(this); if (ec) this->frame()->set_error(ec); else this->frame()->return_value(std::forward(arg)); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base { public: using awaitable_handler_base::awaitable_handler_base; template void operator()(std::exception_ptr ex, Arg&& arg) { this->frame()->attach_thread(this); if (ex) this->frame()->set_except(ex); else this->frame()->return_value(std::forward(arg)); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base> { public: using awaitable_handler_base>::awaitable_handler_base; template void operator()(Args&&... args) { this->frame()->attach_thread(this); this->frame()->return_values(std::forward(args)...); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base> { public: using awaitable_handler_base>::awaitable_handler_base; template void operator()(const asio::error_code& ec, Args&&... args) { this->frame()->attach_thread(this); if (ec) this->frame()->set_error(ec); else this->frame()->return_values(std::forward(args)...); this->frame()->pop_frame(); this->pump(); } }; template class awaitable_handler : public awaitable_handler_base> { public: using awaitable_handler_base>::awaitable_handler_base; template void operator()(std::exception_ptr ex, Args&&... args) { this->frame()->attach_thread(this); if (ex) this->frame()->set_except(ex); else this->frame()->return_values(std::forward(args)...); this->frame()->pop_frame(); this->pump(); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template class async_result, R(Args...)> { public: typedef typename detail::awaitable_handler< Executor, typename decay::type...> handler_type; typedef typename handler_type::awaitable_type return_type; #if defined(_MSC_VER) template static T dummy_return() { return std::move(*static_cast(nullptr)); } template <> static void dummy_return() { } #endif // defined(_MSC_VER) template static return_type initiate(Initiation initiation, use_awaitable_t, InitArgs... args) { co_await [&](auto* frame) { handler_type handler(frame->detach_thread()); std::move(initiation)(std::move(handler), std::move(args)...); return static_cast(nullptr); }; for (;;) {} // Never reached. #if defined(_MSC_VER) co_return dummy_return(); #endif // defined(_MSC_VER) } }; #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_USE_AWAITABLE_HPP galera-4-26.4.25/asio/asio/impl/read_at.hpp000644 000164 177776 00000057432 15107057155 021466 0ustar00jenkinsnogroup000000 000000 // // impl/read_at.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_READ_AT_HPP #define ASIO_IMPL_READ_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/base_from_completion_cond.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/consuming_buffers.hpp" #include "asio/detail/dependent_type.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template std::size_t read_at_buffer_sequence(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, const MutableBufferIterator&, CompletionCondition completion_condition, asio::error_code& ec) { ec = asio::error_code(); asio::detail::consuming_buffers tmp(buffers); while (!tmp.empty()) { if (std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, tmp.total_consumed()))) { tmp.consume(d.read_some_at(offset + tmp.total_consumed(), tmp.prepare(max_size), ec)); } else break; } return tmp.total_consumed();; } } // namespace detail template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec) { return detail::read_at_buffer_sequence(d, offset, buffers, asio::buffer_sequence_begin(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t bytes_transferred = read_at( d, offset, buffers, transfer_all(), ec); asio::detail::throw_error(ec, "read_at"); return bytes_transferred; } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { return read_at(d, offset, buffers, transfer_all(), ec); } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition) { asio::error_code ec; std::size_t bytes_transferred = read_at(d, offset, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "read_at"); return bytes_transferred; } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec) { ec = asio::error_code(); std::size_t total_transferred = 0; std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); std::size_t bytes_available = read_size_helper(b, max_size); while (bytes_available > 0) { std::size_t bytes_transferred = d.read_some_at( offset + total_transferred, b.prepare(bytes_available), ec); b.commit(bytes_transferred); total_transferred += bytes_transferred; max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); bytes_available = read_size_helper(b, max_size); } return total_transferred; } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b) { asio::error_code ec; std::size_t bytes_transferred = read_at( d, offset, b, transfer_all(), ec); asio::detail::throw_error(ec, "read_at"); return bytes_transferred; } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b, asio::error_code& ec) { return read_at(d, offset, b, transfer_all(), ec); } template inline std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition) { asio::error_code ec; std::size_t bytes_transferred = read_at(d, offset, b, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "read_at"); return bytes_transferred; } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) namespace detail { template class read_at_op : detail::base_from_completion_cond { public: read_at_op(AsyncRandomAccessReadDevice& device, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition& completion_condition, ReadHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), device_(device), offset_(offset), buffers_(buffers), start_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_at_op(const read_at_op& other) : detail::base_from_completion_cond(other), device_(other.device_), offset_(other.offset_), buffers_(other.buffers_), start_(other.start_), handler_(other.handler_) { } read_at_op(read_at_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), device_(other.device_), offset_(other.offset_), buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)), start_(other.start_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, buffers_.total_consumed()); do { device_.async_read_some_at( offset_ + buffers_.total_consumed(), buffers_.prepare(max_size), ASIO_MOVE_CAST(read_at_op)(*this)); return; default: buffers_.consume(bytes_transferred); if ((!ec && bytes_transferred == 0) || buffers_.empty()) break; max_size = this->check_for_completion(ec, buffers_.total_consumed()); } while (max_size > 0); handler_(ec, buffers_.total_consumed()); } } //private: typedef asio::detail::consuming_buffers buffers_type; AsyncRandomAccessReadDevice& device_; uint64_t offset_; buffers_type buffers_; int start_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_at_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_at_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_at_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_at_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_at_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void start_read_at_buffer_sequence_op(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, const MutableBufferIterator&, CompletionCondition& completion_condition, ReadHandler& handler) { detail::read_at_op( d, offset, buffers, completion_condition, handler)( asio::error_code(), 0, 1); } struct initiate_async_read_at_buffer_sequence { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncRandomAccessReadDevice* d, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); start_read_at_buffer_sequence_op(*d, offset, buffers, asio::buffer_sequence_begin(buffers), completion_cond2.value, handler2.value); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_at_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_at_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_at_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_at_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_read_at_buffer_sequence(), handler, &d, offset, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_read_at_buffer_sequence(), handler, &d, offset, buffers, transfer_all()); } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) namespace detail { template class read_at_streambuf_op : detail::base_from_completion_cond { public: read_at_streambuf_op(AsyncRandomAccessReadDevice& device, uint64_t offset, basic_streambuf& streambuf, CompletionCondition& completion_condition, ReadHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), device_(device), offset_(offset), streambuf_(streambuf), start_(0), total_transferred_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_at_streambuf_op(const read_at_streambuf_op& other) : detail::base_from_completion_cond(other), device_(other.device_), offset_(other.offset_), streambuf_(other.streambuf_), start_(other.start_), total_transferred_(other.total_transferred_), handler_(other.handler_) { } read_at_streambuf_op(read_at_streambuf_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), device_(other.device_), offset_(other.offset_), streambuf_(other.streambuf_), start_(other.start_), total_transferred_(other.total_transferred_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size, bytes_available; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, total_transferred_); bytes_available = read_size_helper(streambuf_, max_size); for (;;) { device_.async_read_some_at(offset_ + total_transferred_, streambuf_.prepare(bytes_available), ASIO_MOVE_CAST(read_at_streambuf_op)(*this)); return; default: total_transferred_ += bytes_transferred; streambuf_.commit(bytes_transferred); max_size = this->check_for_completion(ec, total_transferred_); bytes_available = read_size_helper(streambuf_, max_size); if ((!ec && bytes_transferred == 0) || bytes_available == 0) break; } handler_(ec, static_cast(total_transferred_)); } } //private: AsyncRandomAccessReadDevice& device_; uint64_t offset_; asio::basic_streambuf& streambuf_; int start_; std::size_t total_transferred_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_at_streambuf_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_at_streambuf_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_at_streambuf_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_at_streambuf_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_at_streambuf_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_at_streambuf { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncRandomAccessReadDevice* d, uint64_t offset, basic_streambuf* b, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); read_at_streambuf_op::type>( *d, offset, *b, completion_cond2.value, handler2.value)( asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_at_streambuf_op, Allocator1> { typedef typename associated_allocator::type type; static type get( const detail::read_at_streambuf_op& h, const Allocator1& a = Allocator1()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_at_streambuf_op, Executor1> { typedef typename associated_executor::type type; static type get( const detail::read_at_streambuf_op& h, const Executor1& ex = Executor1()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_read_at_streambuf(), handler, &d, offset, &b, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, asio::basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( detail::initiate_async_read_at_streambuf(), handler, &d, offset, &b, transfer_all()); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_READ_AT_HPP galera-4-26.4.25/asio/asio/impl/awaitable.hpp000644 000164 177776 00000025632 15107057155 022015 0ustar00jenkinsnogroup000000 000000 // // impl/awaitable.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_AWAITABLE_HPP #define ASIO_IMPL_AWAITABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include "asio/detail/thread_context.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/detail/type_traits.hpp" #include "asio/post.hpp" #include "asio/system_error.hpp" #include "asio/this_coro.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // An awaitable_thread represents a thread-of-execution that is composed of one // or more "stack frames", with each frame represented by an awaitable_frame. // All execution occurs in the context of the awaitable_thread's executor. An // awaitable_thread continues to "pump" the stack frames by repeatedly resuming // the top stack frame until the stack is empty, or until ownership of the // stack is transferred to another awaitable_thread object. // // +------------------------------------+ // | top_of_stack_ | // | V // +--------------+---+ +-----------------+ // | | | | // | awaitable_thread |<---------------------------+ awaitable_frame | // | | attached_thread_ | | // +--------------+---+ (Set only when +---+-------------+ // | frames are being | // | actively pumped | caller_ // | by a thread, and | // | then only for V // | the top frame.) +-----------------+ // | | | // | | awaitable_frame | // | | | // | +---+-------------+ // | | // | | caller_ // | : // | : // | | // | V // | +-----------------+ // | bottom_of_stack_ | | // +------------------------------->| awaitable_frame | // | | // +-----------------+ template class awaitable_frame_base { public: #if !defined(ASIO_DISABLE_AWAITABLE_FRAME_RECYCLING) void* operator new(std::size_t size) { return asio::detail::thread_info_base::allocate( asio::detail::thread_info_base::awaitable_frame_tag(), asio::detail::thread_context::thread_call_stack::top(), size); } void operator delete(void* pointer, std::size_t size) { asio::detail::thread_info_base::deallocate( asio::detail::thread_info_base::awaitable_frame_tag(), asio::detail::thread_context::thread_call_stack::top(), pointer, size); } #endif // !defined(ASIO_DISABLE_AWAITABLE_FRAME_RECYCLING) // The frame starts in a suspended state until the awaitable_thread object // pumps the stack. auto initial_suspend() noexcept { return suspend_always(); } // On final suspension the frame is popped from the top of the stack. auto final_suspend() noexcept { struct result { awaitable_frame_base* this_; bool await_ready() const noexcept { return false; } void await_suspend(coroutine_handle) noexcept { this_->pop_frame(); } void await_resume() const noexcept { } }; return result{this}; } void set_except(std::exception_ptr e) noexcept { pending_exception_ = e; } void set_error(const asio::error_code& ec) { this->set_except(std::make_exception_ptr(asio::system_error(ec))); } void unhandled_exception() { set_except(std::current_exception()); } void rethrow_exception() { if (pending_exception_) { std::exception_ptr ex = std::exchange(pending_exception_, nullptr); std::rethrow_exception(ex); } } template auto await_transform(awaitable a) const { return a; } // This await transformation obtains the associated executor of the thread of // execution. auto await_transform(this_coro::executor_t) noexcept { struct result { awaitable_frame_base* this_; bool await_ready() const noexcept { return true; } void await_suspend(coroutine_handle) noexcept { } auto await_resume() const noexcept { return this_->attached_thread_->get_executor(); } }; return result{this}; } // This await transformation is used to run an async operation's initiation // function object after the coroutine has been suspended. This ensures that // immediate resumption of the coroutine in another thread does not cause a // race condition. template auto await_transform(Function f, typename enable_if< is_convertible< typename result_of::type, awaitable_thread* >::value >::type* = 0) { struct result { Function function_; awaitable_frame_base* this_; bool await_ready() const noexcept { return false; } void await_suspend(coroutine_handle) noexcept { function_(this_); } void await_resume() const noexcept { } }; return result{std::move(f), this}; } void attach_thread(awaitable_thread* handler) noexcept { attached_thread_ = handler; } awaitable_thread* detach_thread() noexcept { return std::exchange(attached_thread_, nullptr); } void push_frame(awaitable_frame_base* caller) noexcept { caller_ = caller; attached_thread_ = caller_->attached_thread_; attached_thread_->top_of_stack_ = this; caller_->attached_thread_ = nullptr; } void pop_frame() noexcept { if (caller_) caller_->attached_thread_ = attached_thread_; attached_thread_->top_of_stack_ = caller_; attached_thread_ = nullptr; caller_ = nullptr; } void resume() { coro_.resume(); } void destroy() { coro_.destroy(); } protected: coroutine_handle coro_ = nullptr; awaitable_thread* attached_thread_ = nullptr; awaitable_frame_base* caller_ = nullptr; std::exception_ptr pending_exception_ = nullptr; }; template class awaitable_frame : public awaitable_frame_base { public: awaitable_frame() noexcept { } awaitable_frame(awaitable_frame&& other) noexcept : awaitable_frame_base(std::move(other)) { } ~awaitable_frame() { if (has_result_) static_cast(static_cast(result_))->~T(); } awaitable get_return_object() noexcept { this->coro_ = coroutine_handle::from_promise(*this); return awaitable(this); }; template void return_value(U&& u) { new (&result_) T(std::forward(u)); has_result_ = true; } template void return_values(Us&&... us) { this->return_value(std::forward_as_tuple(std::forward(us)...)); } T get() { this->caller_ = nullptr; this->rethrow_exception(); return std::move(*static_cast(static_cast(result_))); } private: alignas(T) unsigned char result_[sizeof(T)]; bool has_result_ = false; }; template class awaitable_frame : public awaitable_frame_base { public: awaitable get_return_object() { this->coro_ = coroutine_handle::from_promise(*this); return awaitable(this); }; void return_void() { } void get() { this->caller_ = nullptr; this->rethrow_exception(); } }; template class awaitable_thread { public: typedef Executor executor_type; // Construct from the entry point of a new thread of execution. awaitable_thread(awaitable p, const Executor& ex) : bottom_of_stack_(std::move(p)), top_of_stack_(bottom_of_stack_.frame_), executor_(ex) { } // Transfer ownership from another awaitable_thread. awaitable_thread(awaitable_thread&& other) noexcept : bottom_of_stack_(std::move(other.bottom_of_stack_)), top_of_stack_(std::exchange(other.top_of_stack_, nullptr)), executor_(std::move(other.executor_)) { } // Clean up with a last ditch effort to ensure the thread is unwound within // the context of the executor. ~awaitable_thread() { if (bottom_of_stack_.valid()) { // Coroutine "stack unwinding" must be performed through the executor. (post)(executor_, [a = std::move(bottom_of_stack_)]() mutable { awaitable(std::move(a)); }); } } executor_type get_executor() const noexcept { return executor_; } // Launch a new thread of execution. void launch() { top_of_stack_->attach_thread(this); pump(); } protected: template friend class awaitable_frame_base; // Repeatedly resume the top stack frame until the stack is empty or until it // has been transferred to another resumable_thread object. void pump() { do top_of_stack_->resume(); while (top_of_stack_); if (bottom_of_stack_.valid()) { awaitable a(std::move(bottom_of_stack_)); a.frame_->rethrow_exception(); } } awaitable bottom_of_stack_; awaitable_frame_base* top_of_stack_; executor_type executor_; }; } // namespace detail } // namespace asio #if !defined(GENERATING_DOCUMENTATION) namespace std { namespace experimental { template struct coroutine_traits, Args...> { typedef asio::detail::awaitable_frame promise_type; }; }} // namespace std::experimental #endif // !defined(GENERATING_DOCUMENTATION) #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_AWAITABLE_HPP galera-4-26.4.25/asio/asio/impl/buffered_write_stream.hpp000644 000164 177776 00000031777 15107057155 024442 0ustar00jenkinsnogroup000000 000000 // // impl/buffered_write_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP #define ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/push_options.hpp" namespace asio { template std::size_t buffered_write_stream::flush() { std::size_t bytes_written = write(next_layer_, buffer(storage_.data(), storage_.size())); storage_.consume(bytes_written); return bytes_written; } template std::size_t buffered_write_stream::flush(asio::error_code& ec) { std::size_t bytes_written = write(next_layer_, buffer(storage_.data(), storage_.size()), transfer_all(), ec); storage_.consume(bytes_written); return bytes_written; } namespace detail { template class buffered_flush_handler { public: buffered_flush_handler(detail::buffered_stream_storage& storage, WriteHandler& handler) : storage_(storage), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) buffered_flush_handler(const buffered_flush_handler& other) : storage_(other.storage_), handler_(other.handler_) { } buffered_flush_handler(buffered_flush_handler&& other) : storage_(other.storage_), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, const std::size_t bytes_written) { storage_.consume(bytes_written); handler_(ec, bytes_written); } //private: detail::buffered_stream_storage& storage_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, buffered_flush_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, buffered_flush_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( buffered_flush_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, buffered_flush_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, buffered_flush_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_buffered_flush { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, buffered_stream_storage* storage, Stream* next_layer) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); async_write(*next_layer, buffer(storage->data(), storage->size()), buffered_flush_handler::type>( *storage, handler2.value)); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::buffered_flush_handler, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::buffered_flush_handler& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::buffered_flush_handler, Executor> { typedef typename associated_executor::type type; static type get(const detail::buffered_flush_handler& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) buffered_write_stream::async_flush( ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_buffered_flush(), handler, &storage_, &next_layer_); } template template std::size_t buffered_write_stream::write_some( const ConstBufferSequence& buffers) { using asio::buffer_size; if (buffer_size(buffers) == 0) return 0; if (storage_.size() == storage_.capacity()) this->flush(); return this->copy(buffers); } template template std::size_t buffered_write_stream::write_some( const ConstBufferSequence& buffers, asio::error_code& ec) { ec = asio::error_code(); using asio::buffer_size; if (buffer_size(buffers) == 0) return 0; if (storage_.size() == storage_.capacity() && !flush(ec)) return 0; return this->copy(buffers); } namespace detail { template class buffered_write_some_handler { public: buffered_write_some_handler(detail::buffered_stream_storage& storage, const ConstBufferSequence& buffers, WriteHandler& handler) : storage_(storage), buffers_(buffers), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) buffered_write_some_handler(const buffered_write_some_handler& other) : storage_(other.storage_), buffers_(other.buffers_), handler_(other.handler_) { } buffered_write_some_handler(buffered_write_some_handler&& other) : storage_(other.storage_), buffers_(other.buffers_), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t) { if (ec) { const std::size_t length = 0; handler_(ec, length); } else { using asio::buffer_size; std::size_t orig_size = storage_.size(); std::size_t space_avail = storage_.capacity() - orig_size; std::size_t bytes_avail = buffer_size(buffers_); std::size_t length = bytes_avail < space_avail ? bytes_avail : space_avail; storage_.resize(orig_size + length); const std::size_t bytes_copied = asio::buffer_copy( storage_.data() + orig_size, buffers_, length); handler_(ec, bytes_copied); } } //private: detail::buffered_stream_storage& storage_; ConstBufferSequence buffers_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, buffered_write_some_handler< ConstBufferSequence, WriteHandler>* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, buffered_write_some_handler< ConstBufferSequence, WriteHandler>* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( buffered_write_some_handler< ConstBufferSequence, WriteHandler>* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, buffered_write_some_handler< ConstBufferSequence, WriteHandler>* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, buffered_write_some_handler< ConstBufferSequence, WriteHandler>* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_buffered_write_some { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, buffered_stream_storage* storage, Stream* next_layer, const ConstBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; using asio::buffer_size; non_const_lvalue handler2(handler); if (buffer_size(buffers) == 0 || storage->size() < storage->capacity()) { next_layer->async_write_some(ASIO_CONST_BUFFER(0, 0), buffered_write_some_handler::type>( *storage, buffers, handler2.value)); } else { initiate_async_buffered_flush()( buffered_write_some_handler::type>( *storage, buffers, handler2.value), storage, next_layer); } } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::buffered_write_some_handler, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::buffered_write_some_handler< ConstBufferSequence, WriteHandler>& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::buffered_write_some_handler, Executor> { typedef typename associated_executor::type type; static type get( const detail::buffered_write_some_handler< ConstBufferSequence, WriteHandler>& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) buffered_write_stream::async_write_some( const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_buffered_write_some(), handler, &storage_, &next_layer_, buffers); } template template std::size_t buffered_write_stream::copy( const ConstBufferSequence& buffers) { using asio::buffer_size; std::size_t orig_size = storage_.size(); std::size_t space_avail = storage_.capacity() - orig_size; std::size_t bytes_avail = buffer_size(buffers); std::size_t length = bytes_avail < space_avail ? bytes_avail : space_avail; storage_.resize(orig_size + length); return asio::buffer_copy( storage_.data() + orig_size, buffers, length); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_BUFFERED_WRITE_STREAM_HPP galera-4-26.4.25/asio/asio/impl/defer.hpp000644 000164 177776 00000005207 15107057155 021145 0ustar00jenkinsnogroup000000 000000 // // impl/defer.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_DEFER_HPP #define ASIO_IMPL_DEFER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/work_dispatcher.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct initiate_defer { template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const { typedef typename decay::type DecayedHandler; typename associated_executor::type ex( (get_associated_executor)(handler)); typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.defer(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc); } template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler, ASIO_MOVE_ARG(Executor) ex) const { typedef typename decay::type DecayedHandler; typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.defer(detail::work_dispatcher( ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc); } }; } // namespace detail template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( ASIO_MOVE_ARG(CompletionToken) token) { return async_initiate( detail::initiate_defer(), token); } template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return async_initiate( detail::initiate_defer(), token, ex); } template inline ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return (defer)(ctx.get_executor(), ASIO_MOVE_CAST(CompletionToken)(token)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_DEFER_HPP galera-4-26.4.25/asio/asio/impl/read.hpp000644 000164 177776 00000115305 15107057155 020774 0ustar00jenkinsnogroup000000 000000 // // impl/read.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_READ_HPP #define ASIO_IMPL_READ_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/base_from_completion_cond.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/consuming_buffers.hpp" #include "asio/detail/dependent_type.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template std::size_t read_buffer_sequence(SyncReadStream& s, const MutableBufferSequence& buffers, const MutableBufferIterator&, CompletionCondition completion_condition, asio::error_code& ec) { ec = asio::error_code(); asio::detail::consuming_buffers tmp(buffers); while (!tmp.empty()) { if (std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, tmp.total_consumed()))) tmp.consume(s.read_some(tmp.prepare(max_size), ec)); else break; } return tmp.total_consumed();; } } // namespace detail template std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_mutable_buffer_sequence::value >::type*) { return detail::read_buffer_sequence(s, buffers, asio::buffer_sequence_begin(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, typename enable_if< is_mutable_buffer_sequence::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, buffers, transfer_all(), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } template inline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, asio::error_code& ec, typename enable_if< is_mutable_buffer_sequence::value >::type*) { return read(s, buffers, transfer_all(), ec); } template inline std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, typename enable_if< is_mutable_buffer_sequence::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { typename decay::type b( ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers)); ec = asio::error_code(); std::size_t total_transferred = 0; std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); std::size_t bytes_available = std::min( std::max(512, b.capacity() - b.size()), std::min(max_size, b.max_size() - b.size())); while (bytes_available > 0) { std::size_t bytes_transferred = s.read_some(b.prepare(bytes_available), ec); b.commit(bytes_transferred); total_transferred += bytes_transferred; max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); bytes_available = std::min( std::max(512, b.capacity() - b.size()), std::min(max_size, b.max_size() - b.size())); } return total_transferred; } template inline std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } template inline std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return read(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ec); } template inline std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template inline std::size_t read(SyncReadStream& s, asio::basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec) { return read(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t read(SyncReadStream& s, asio::basic_streambuf& b) { return read(s, basic_streambuf_ref(b)); } template inline std::size_t read(SyncReadStream& s, asio::basic_streambuf& b, asio::error_code& ec) { return read(s, basic_streambuf_ref(b), ec); } template inline std::size_t read(SyncReadStream& s, asio::basic_streambuf& b, CompletionCondition completion_condition) { return read(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) template std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { DynamicBuffer_v2& b = buffers; ec = asio::error_code(); std::size_t total_transferred = 0; std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); std::size_t bytes_available = std::min( std::max(512, b.capacity() - b.size()), std::min(max_size, b.max_size() - b.size())); while (bytes_available > 0) { std::size_t pos = b.size(); b.grow(bytes_available); std::size_t bytes_transferred = s.read_some( b.data(pos, bytes_available), ec); b.shrink(bytes_available - bytes_transferred); total_transferred += bytes_transferred; max_size = detail::adapt_completion_condition_result( completion_condition(ec, total_transferred)); bytes_available = std::min( std::max(512, b.capacity() - b.size()), std::min(max_size, b.max_size() - b.size())); } return total_transferred; } template inline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } template inline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return read(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ec); } template inline std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v2::value >::type*) { asio::error_code ec; std::size_t bytes_transferred = read(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "read"); return bytes_transferred; } namespace detail { template class read_op : detail::base_from_completion_cond { public: read_op(AsyncReadStream& stream, const MutableBufferSequence& buffers, CompletionCondition& completion_condition, ReadHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), stream_(stream), buffers_(buffers), start_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_op(const read_op& other) : detail::base_from_completion_cond(other), stream_(other.stream_), buffers_(other.buffers_), start_(other.start_), handler_(other.handler_) { } read_op(read_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), stream_(other.stream_), buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)), start_(other.start_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, buffers_.total_consumed()); do { stream_.async_read_some(buffers_.prepare(max_size), ASIO_MOVE_CAST(read_op)(*this)); return; default: buffers_.consume(bytes_transferred); if ((!ec && bytes_transferred == 0) || buffers_.empty()) break; max_size = this->check_for_completion(ec, buffers_.total_consumed()); } while (max_size > 0); handler_(ec, buffers_.total_consumed()); } } //private: typedef asio::detail::consuming_buffers buffers_type; AsyncReadStream& stream_; buffers_type buffers_; int start_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void start_read_buffer_sequence_op(AsyncReadStream& stream, const MutableBufferSequence& buffers, const MutableBufferIterator&, CompletionCondition& completion_condition, ReadHandler& handler) { detail::read_op( stream, buffers, completion_condition, handler)( asio::error_code(), 0, 1); } struct initiate_async_read_buffer_sequence { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); start_read_buffer_sequence_op(*s, buffers, asio::buffer_sequence_begin(buffers), completion_cond2.value, handler2.value); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_mutable_buffer_sequence::value >::type*) { return async_initiate( detail::initiate_async_read_buffer_sequence(), handler, &s, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_mutable_buffer_sequence::value >::type*) { return async_initiate( detail::initiate_async_read_buffer_sequence(), handler, &s, buffers, transfer_all()); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class read_dynbuf_v1_op : detail::base_from_completion_cond { public: template read_dynbuf_v1_op(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, CompletionCondition& completion_condition, ReadHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), start_(0), total_transferred_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_dynbuf_v1_op(const read_dynbuf_v1_op& other) : detail::base_from_completion_cond(other), stream_(other.stream_), buffers_(other.buffers_), start_(other.start_), total_transferred_(other.total_transferred_), handler_(other.handler_) { } read_dynbuf_v1_op(read_dynbuf_v1_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v1)(other.buffers_)), start_(other.start_), total_transferred_(other.total_transferred_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size, bytes_available; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, total_transferred_); bytes_available = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(max_size, buffers_.max_size() - buffers_.size())); for (;;) { stream_.async_read_some(buffers_.prepare(bytes_available), ASIO_MOVE_CAST(read_dynbuf_v1_op)(*this)); return; default: total_transferred_ += bytes_transferred; buffers_.commit(bytes_transferred); max_size = this->check_for_completion(ec, total_transferred_); bytes_available = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(max_size, buffers_.max_size() - buffers_.size())); if ((!ec && bytes_transferred == 0) || bytes_available == 0) break; } handler_(ec, static_cast(total_transferred_)); } } //private: AsyncReadStream& stream_; DynamicBuffer_v1 buffers_; int start_; std::size_t total_transferred_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_dynbuf_v1_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_dynbuf_v1_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_dynbuf_v1_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_dynbuf_v1_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_dynbuf_v1_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_dynbuf_v1 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); read_dynbuf_v1_op::type, CompletionCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), completion_cond2.value, handler2.value)( asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_dynbuf_v1_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_dynbuf_v1_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_dynbuf_v1_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_dynbuf_v1_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { return async_read(s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), transfer_all(), ASIO_MOVE_CAST(ReadHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type*) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return async_initiate( detail::initiate_async_read_dynbuf_v1(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v1)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler) { return async_read(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(ReadHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler) { return async_read(s, basic_streambuf_ref(b), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ASIO_MOVE_CAST(ReadHandler)(handler)); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) namespace detail { template class read_dynbuf_v2_op : detail::base_from_completion_cond { public: template read_dynbuf_v2_op(AsyncReadStream& stream, ASIO_MOVE_ARG(BufferSequence) buffers, CompletionCondition& completion_condition, ReadHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), stream_(stream), buffers_(ASIO_MOVE_CAST(BufferSequence)(buffers)), start_(0), total_transferred_(0), bytes_available_(0), handler_(ASIO_MOVE_CAST(ReadHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) read_dynbuf_v2_op(const read_dynbuf_v2_op& other) : detail::base_from_completion_cond(other), stream_(other.stream_), buffers_(other.buffers_), start_(other.start_), total_transferred_(other.total_transferred_), bytes_available_(other.bytes_available_), handler_(other.handler_) { } read_dynbuf_v2_op(read_dynbuf_v2_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), stream_(other.stream_), buffers_(ASIO_MOVE_CAST(DynamicBuffer_v2)(other.buffers_)), start_(other.start_), total_transferred_(other.total_transferred_), bytes_available_(other.bytes_available_), handler_(ASIO_MOVE_CAST(ReadHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size, pos; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, total_transferred_); bytes_available_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(max_size, buffers_.max_size() - buffers_.size())); for (;;) { pos = buffers_.size(); buffers_.grow(bytes_available_); stream_.async_read_some(buffers_.data(pos, bytes_available_), ASIO_MOVE_CAST(read_dynbuf_v2_op)(*this)); return; default: total_transferred_ += bytes_transferred; buffers_.shrink(bytes_available_ - bytes_transferred); max_size = this->check_for_completion(ec, total_transferred_); bytes_available_ = std::min( std::max(512, buffers_.capacity() - buffers_.size()), std::min(max_size, buffers_.max_size() - buffers_.size())); if ((!ec && bytes_transferred == 0) || bytes_available_ == 0) break; } handler_(ec, static_cast(total_transferred_)); } } //private: AsyncReadStream& stream_; DynamicBuffer_v2 buffers_; int start_; std::size_t total_transferred_; std::size_t bytes_available_; ReadHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, read_dynbuf_v2_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, read_dynbuf_v2_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( read_dynbuf_v2_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, read_dynbuf_v2_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, read_dynbuf_v2_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_read_dynbuf_v2 { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, AsyncReadStream* s, ASIO_MOVE_ARG(DynamicBuffer_v2) buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); read_dynbuf_v2_op::type, CompletionCondition, typename decay::type>( *s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), completion_cond2.value, handler2.value)( asio::error_code(), 0, 1); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::read_dynbuf_v2_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::read_dynbuf_v2_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::read_dynbuf_v2_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::read_dynbuf_v2_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { return async_read(s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), transfer_all(), ASIO_MOVE_CAST(ReadHandler)(handler)); } template inline ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type*) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; return async_initiate( detail::initiate_async_read_dynbuf_v2(), handler, &s, ASIO_MOVE_CAST(DynamicBuffer_v2)(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_READ_HPP galera-4-26.4.25/asio/asio/impl/execution_context.hpp000644 000164 177776 00000006020 15107057155 023621 0ustar00jenkinsnogroup000000 000000 // // impl/execution_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_EXECUTION_CONTEXT_HPP #define ASIO_IMPL_EXECUTION_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/service_registry.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(GENERATING_DOCUMENTATION) template inline Service& use_service(execution_context& e) { // Check that Service meets the necessary type requirements. (void)static_cast(static_cast(0)); return e.service_registry_->template use_service(); } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template Service& make_service(execution_context& e, ASIO_MOVE_ARG(Args)... args) { detail::scoped_ptr svc( new Service(e, ASIO_MOVE_CAST(Args)(args)...)); e.service_registry_->template add_service(svc.get()); Service& result = *svc; svc.release(); return result; } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template Service& make_service(execution_context& e) { detail::scoped_ptr svc(new Service(e)); e.service_registry_->template add_service(svc.get()); Service& result = *svc; svc.release(); return result; } #define ASIO_PRIVATE_MAKE_SERVICE_DEF(n) \ template \ Service& make_service(execution_context& e, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ detail::scoped_ptr svc( \ new Service(e, ASIO_VARIADIC_MOVE_ARGS(n))); \ e.service_registry_->template add_service(svc.get()); \ Service& result = *svc; \ svc.release(); \ return result; \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_MAKE_SERVICE_DEF) #undef ASIO_PRIVATE_MAKE_SERVICE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) template inline void add_service(execution_context& e, Service* svc) { // Check that Service meets the necessary type requirements. (void)static_cast(static_cast(0)); e.service_registry_->template add_service(svc); } template inline bool has_service(execution_context& e) { // Check that Service meets the necessary type requirements. (void)static_cast(static_cast(0)); return e.service_registry_->template has_service(); } #endif // !defined(GENERATING_DOCUMENTATION) inline execution_context& execution_context::service::context() { return owner_; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_EXECUTION_CONTEXT_HPP galera-4-26.4.25/asio/asio/impl/error_code.ipp000644 000164 177776 00000014235 15107057155 022205 0ustar00jenkinsnogroup000000 000000 // // impl/error_code.ipp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_ERROR_CODE_IPP #define ASIO_IMPL_ERROR_CODE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include #elif defined(ASIO_WINDOWS_RUNTIME) # include #else # include # include # include #endif #include "asio/detail/local_free_on_block_exit.hpp" #include "asio/detail/socket_types.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class system_category : public error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.system"; } std::string message(int value) const { #if defined(ASIO_WINDOWS_RUNTIME) || defined(ASIO_WINDOWS_APP) std::wstring wmsg(128, wchar_t()); for (;;) { DWORD wlength = ::FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, value, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), &wmsg[0], static_cast(wmsg.size()), 0); if (wlength == 0 && ::GetLastError() == ERROR_INSUFFICIENT_BUFFER) { wmsg.resize(wmsg.size() + wmsg.size() / 2); continue; } if (wlength && wmsg[wlength - 1] == '\n') --wlength; if (wlength && wmsg[wlength - 1] == '\r') --wlength; if (wlength) { std::string msg(wlength * 2, char()); int length = ::WideCharToMultiByte(CP_ACP, 0, wmsg.c_str(), static_cast(wlength), &msg[0], static_cast(wlength * 2), 0, 0); if (length <= 0) return "asio.system error"; msg.resize(static_cast(length)); return msg; } else return "asio.system error"; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) char* msg = 0; DWORD length = ::FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, value, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (char*)&msg, 0, 0); detail::local_free_on_block_exit local_free_obj(msg); if (length && msg[length - 1] == '\n') msg[--length] = '\0'; if (length && msg[length - 1] == '\r') msg[--length] = '\0'; if (length) return msg; else return "asio.system error"; #else // defined(ASIO_WINDOWS_DESKTOP) || defined(__CYGWIN__) #if !defined(__sun) if (value == ECANCELED) return "Operation aborted."; #endif // !defined(__sun) #if defined(__sun) || defined(__QNX__) || defined(__SYMBIAN32__) using namespace std; return strerror(value); #else char buf[256] = ""; using namespace std; return strerror_result(strerror_r(value, buf, sizeof(buf)), buf); #endif #endif // defined(ASIO_WINDOWS_DESKTOP) || defined(__CYGWIN__) } #if defined(ASIO_HAS_STD_ERROR_CODE) std::error_condition default_error_condition( int ev) const ASIO_ERROR_CATEGORY_NOEXCEPT { switch (ev) { case access_denied: return std::errc::permission_denied; case address_family_not_supported: return std::errc::address_family_not_supported; case address_in_use: return std::errc::address_in_use; case already_connected: return std::errc::already_connected; case already_started: return std::errc::connection_already_in_progress; case broken_pipe: return std::errc::broken_pipe; case connection_aborted: return std::errc::connection_aborted; case connection_refused: return std::errc::connection_refused; case connection_reset: return std::errc::connection_reset; case bad_descriptor: return std::errc::bad_file_descriptor; case fault: return std::errc::bad_address; case host_unreachable: return std::errc::host_unreachable; case in_progress: return std::errc::operation_in_progress; case interrupted: return std::errc::interrupted; case invalid_argument: return std::errc::invalid_argument; case message_size: return std::errc::message_size; case name_too_long: return std::errc::filename_too_long; case network_down: return std::errc::network_down; case network_reset: return std::errc::network_reset; case network_unreachable: return std::errc::network_unreachable; case no_descriptors: return std::errc::too_many_files_open; case no_buffer_space: return std::errc::no_buffer_space; case no_memory: return std::errc::not_enough_memory; case no_permission: return std::errc::operation_not_permitted; case no_protocol_option: return std::errc::no_protocol_option; case no_such_device: return std::errc::no_such_device; case not_connected: return std::errc::not_connected; case not_socket: return std::errc::not_a_socket; case operation_aborted: return std::errc::operation_canceled; case operation_not_supported: return std::errc::operation_not_supported; case shut_down: return std::make_error_condition(ev, *this); case timed_out: return std::errc::timed_out; case try_again: return std::errc::resource_unavailable_try_again; case would_block: return std::errc::operation_would_block; default: return std::make_error_condition(ev, *this); } #endif // defined(ASIO_HAS_STD_ERROR_CODE) private: // Helper function to adapt the result from glibc's variant of strerror_r. static const char* strerror_result(int, const char* s) { return s; } static const char* strerror_result(const char* s, const char*) { return s; } }; } // namespace detail const error_category& system_category() { static detail::system_category instance; return instance; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_ERROR_CODE_IPP galera-4-26.4.25/asio/asio/impl/src.cpp000644 000164 177776 00000001266 15107057155 020643 0ustar00jenkinsnogroup000000 000000 // // impl/src.cpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #if defined(_MSC_VER) \ || defined(__BORLANDC__) \ || defined(__DMC__) # pragma message ( \ "This file is deprecated. " \ "Please #include instead.") #elif defined(__GNUC__) \ || defined(__HP_aCC) \ || defined(__SUNPRO_CC) \ || defined(__IBMCPP__) # warning "This file is deprecated." # warning "Please #include instead." #endif #include "asio/impl/src.hpp" galera-4-26.4.25/asio/asio/impl/post.hpp000644 000164 177776 00000005171 15107057155 021045 0ustar00jenkinsnogroup000000 000000 // // impl/post.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_POST_HPP #define ASIO_IMPL_POST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/work_dispatcher.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct initiate_post { template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const { typedef typename decay::type DecayedHandler; typename associated_executor::type ex( (get_associated_executor)(handler)); typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.post(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc); } template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler, ASIO_MOVE_ARG(Executor) ex) const { typedef typename decay::type DecayedHandler; typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.post(detail::work_dispatcher( ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc); } }; } // namespace detail template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( ASIO_MOVE_ARG(CompletionToken) token) { return async_initiate( detail::initiate_post(), token); } template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return async_initiate( detail::initiate_post(), token, ex); } template inline ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return (post)(ctx.get_executor(), ASIO_MOVE_CAST(CompletionToken)(token)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_POST_HPP galera-4-26.4.25/asio/asio/impl/compose.hpp000644 000164 177776 00000030631 15107057155 021524 0ustar00jenkinsnogroup000000 000000 // // impl/compose.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_COMPOSE_HPP #define ASIO_IMPL_COMPOSE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/executor_work_guard.hpp" #include "asio/is_executor.hpp" #include "asio/system_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct composed_work; template <> struct composed_work { composed_work() ASIO_NOEXCEPT : head_(system_executor()) { } void reset() { head_.reset(); } typedef system_executor head_type; executor_work_guard head_; }; inline composed_work make_composed_work() { return composed_work(); } template struct composed_work { explicit composed_work(const Head& ex) ASIO_NOEXCEPT : head_(ex) { } void reset() { head_.reset(); } typedef Head head_type; executor_work_guard head_; }; template inline composed_work make_composed_work(const Head& head) { return composed_work(head); } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template struct composed_work { explicit composed_work(const Head& head, const Tail&... tail) ASIO_NOEXCEPT : head_(head), tail_(tail...) { } void reset() { head_.reset(); tail_.reset(); } typedef Head head_type; executor_work_guard head_; composed_work tail_; }; template inline composed_work make_composed_work(const Head& head, const Tail&... tail) { return composed_work(head, tail...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) #define ASIO_PRIVATE_COMPOSED_WORK_DEF(n) \ template \ struct composed_work \ { \ explicit composed_work(const Head& head, \ ASIO_VARIADIC_CONSTREF_PARAMS(n)) ASIO_NOEXCEPT \ : head_(head), \ tail_(ASIO_VARIADIC_BYVAL_ARGS(n)) \ { \ } \ \ void reset() \ { \ head_.reset(); \ tail_.reset(); \ } \ \ typedef Head head_type; \ executor_work_guard head_; \ composed_work tail_; \ }; \ \ template \ inline composed_work \ make_composed_work(const Head& head, ASIO_VARIADIC_CONSTREF_PARAMS(n)) \ { \ return composed_work< \ void(Head, ASIO_VARIADIC_TARGS(n))>( \ head, ASIO_VARIADIC_BYVAL_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_COMPOSED_WORK_DEF) #undef ASIO_PRIVATE_COMPOSED_WORK_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template class composed_op; template class composed_op #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template class composed_op #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) { public: composed_op(ASIO_MOVE_ARG(Impl) impl, ASIO_MOVE_ARG(Work) work, ASIO_MOVE_ARG(Handler) handler) : impl_(ASIO_MOVE_CAST(Impl)(impl)), work_(ASIO_MOVE_CAST(Work)(work)), handler_(ASIO_MOVE_CAST(Handler)(handler)), invocations_(0) { } #if defined(ASIO_HAS_MOVE) composed_op(composed_op&& other) : impl_(ASIO_MOVE_CAST(Impl)(other.impl_)), work_(ASIO_MOVE_CAST(Work)(other.work_)), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), invocations_(other.invocations_) { } #endif // defined(ASIO_HAS_MOVE) typedef typename associated_executor::type executor_type; executor_type get_executor() const ASIO_NOEXCEPT { return (get_associated_executor)(handler_, work_.head_.get_executor()); } typedef typename associated_allocator >::type allocator_type; allocator_type get_allocator() const ASIO_NOEXCEPT { return (get_associated_allocator)(handler_, std::allocator()); } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()(ASIO_MOVE_ARG(T)... t) { if (invocations_ < ~unsigned(0)) ++invocations_; impl_(*this, ASIO_MOVE_CAST(T)(t)...); } void complete(Args... args) { this->work_.reset(); this->handler_(ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) void operator()() { if (invocations_ < ~unsigned(0)) ++invocations_; impl_(*this); } void complete() { this->work_.reset(); this->handler_(); } #define ASIO_PRIVATE_COMPOSED_OP_DEF(n) \ template \ void operator()(ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ if (invocations_ < ~unsigned(0)) \ ++invocations_; \ impl_(*this, ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template \ void complete(ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ this->work_.reset(); \ this->handler_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_COMPOSED_OP_DEF) #undef ASIO_PRIVATE_COMPOSED_OP_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) //private: Impl impl_; Work work_; Handler handler_; unsigned invocations_; }; template inline void* asio_handler_allocate(std::size_t size, composed_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, composed_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( composed_op* this_handler) { return this_handler->invocations_ > 1 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, composed_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, composed_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template struct initiate_composed_op { template void operator()(ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Impl) impl, ASIO_MOVE_ARG(Work) work) const { composed_op::type, typename decay::type, typename decay::type, Signature>( ASIO_MOVE_CAST(Impl)(impl), ASIO_MOVE_CAST(Work)(work), ASIO_MOVE_CAST(Handler)(handler))(); } }; template inline typename IoObject::executor_type get_composed_io_executor(IoObject& io_object) { return io_object.get_executor(); } template inline const Executor& get_composed_io_executor(const Executor& ex, typename enable_if::value>::type* = 0) { return ex; } } // namespace detail #if !defined(GENERATING_DOCUMENTATION) #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) async_compose(ASIO_MOVE_ARG(Implementation) implementation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors) { return async_initiate( detail::initiate_composed_op(), token, ASIO_MOVE_CAST(Implementation)(implementation), detail::make_composed_work( detail::get_composed_io_executor( ASIO_MOVE_CAST(IoObjectsOrExecutors)( io_objects_or_executors))...)); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) async_compose(ASIO_MOVE_ARG(Implementation) implementation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token) { return async_initiate( detail::initiate_composed_op(), token, ASIO_MOVE_CAST(Implementation)(implementation), detail::make_composed_work()); } # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n) \ ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_##n # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)) # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)) # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3)) # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T4)(x4)) # define ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T4)(x4)), \ detail::get_composed_io_executor(ASIO_MOVE_CAST(T5)(x5)) #define ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \ template \ ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) \ async_compose(ASIO_MOVE_ARG(Implementation) implementation, \ ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return async_initiate( \ detail::initiate_composed_op(), token, \ ASIO_MOVE_CAST(Implementation)(implementation), \ detail::make_composed_work( \ ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n))); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_COMPOSE_DEF) #undef ASIO_PRIVATE_ASYNC_COMPOSE_DEF #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 #undef ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_COMPOSE_HPP galera-4-26.4.25/asio/asio/impl/execution_context.ipp000644 000164 177776 00000003244 15107057155 023627 0ustar00jenkinsnogroup000000 000000 // // impl/execution_context.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_EXECUTION_CONTEXT_IPP #define ASIO_IMPL_EXECUTION_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/execution_context.hpp" #include "asio/detail/service_registry.hpp" #include "asio/detail/push_options.hpp" namespace asio { execution_context::execution_context() : service_registry_(new asio::detail::service_registry(*this)) { } execution_context::~execution_context() { shutdown(); destroy(); delete service_registry_; } void execution_context::shutdown() { service_registry_->shutdown_services(); } void execution_context::destroy() { service_registry_->destroy_services(); } void execution_context::notify_fork( asio::execution_context::fork_event event) { service_registry_->notify_fork(event); } execution_context::service::service(execution_context& owner) : owner_(owner), next_(0) { } execution_context::service::~service() { } void execution_context::service::notify_fork(execution_context::fork_event) { } service_already_exists::service_already_exists() : std::logic_error("Service already exists.") { } invalid_service_owner::invalid_service_owner() : std::logic_error("Invalid service owner.") { } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_EXECUTION_CONTEXT_IPP galera-4-26.4.25/asio/asio/impl/system_context.hpp000644 000164 177776 00000001432 15107057155 023144 0ustar00jenkinsnogroup000000 000000 // // impl/system_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SYSTEM_CONTEXT_HPP #define ASIO_IMPL_SYSTEM_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/system_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { inline system_context::executor_type system_context::get_executor() ASIO_NOEXCEPT { return system_executor(); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_SYSTEM_CONTEXT_HPP galera-4-26.4.25/asio/asio/impl/redirect_error.hpp000644 000164 177776 00000024272 15107057155 023075 0ustar00jenkinsnogroup000000 000000 // impl/redirect_error.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_REDIRECT_ERROR_HPP #define ASIO_IMPL_REDIRECT_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_executor.hpp" #include "asio/associated_allocator.hpp" #include "asio/async_result.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Class to adapt a redirect_error_t as a completion handler. template class redirect_error_handler { public: typedef void result_type; template redirect_error_handler(redirect_error_t e) : ec_(e.ec_), handler_(ASIO_MOVE_CAST(CompletionToken)(e.token_)) { } template redirect_error_handler(asio::error_code& ec, ASIO_MOVE_ARG(RedirectedHandler) h) : ec_(ec), handler_(ASIO_MOVE_CAST(RedirectedHandler)(h)) { } void operator()() { handler_(); } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template typename enable_if< !is_same::type, asio::error_code>::value >::type operator()(ASIO_MOVE_ARG(Arg) arg, ASIO_MOVE_ARG(Args)... args) { handler_(ASIO_MOVE_CAST(Arg)(arg), ASIO_MOVE_CAST(Args)(args)...); } template void operator()(const asio::error_code& ec, ASIO_MOVE_ARG(Args)... args) { ec_ = ec; handler_(ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template typename enable_if< !is_same::type, asio::error_code>::value >::type operator()(ASIO_MOVE_ARG(Arg) arg) { handler_(ASIO_MOVE_CAST(Arg)(arg)); } void operator()(const asio::error_code& ec) { ec_ = ec; handler_(); } #define ASIO_PRIVATE_REDIRECT_ERROR_DEF(n) \ template \ typename enable_if< \ !is_same::type, asio::error_code>::value \ >::type \ operator()(ASIO_MOVE_ARG(Arg) arg, ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ handler_(ASIO_MOVE_CAST(Arg)(arg), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template \ void operator()(const asio::error_code& ec, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ ec_ = ec; \ handler_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_REDIRECT_ERROR_DEF) #undef ASIO_PRIVATE_REDIRECT_ERROR_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) //private: asio::error_code& ec_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, redirect_error_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, redirect_error_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( redirect_error_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, redirect_error_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, redirect_error_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template struct redirect_error_signature { typedef Signature type; }; #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template struct redirect_error_signature { typedef R type(Args...); }; template struct redirect_error_signature { typedef R type(Args...); }; #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template struct redirect_error_signature { typedef R type(); }; template struct redirect_error_signature { typedef R type(); }; #define ASIO_PRIVATE_REDIRECT_ERROR_DEF(n) \ template \ struct redirect_error_signature< \ R(asio::error_code, ASIO_VARIADIC_TARGS(n))> \ { \ typedef R type(ASIO_VARIADIC_TARGS(n)); \ }; \ \ template \ struct redirect_error_signature< \ R(const asio::error_code&, ASIO_VARIADIC_TARGS(n))> \ { \ typedef R type(ASIO_VARIADIC_TARGS(n)); \ }; \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_REDIRECT_ERROR_DEF) #undef ASIO_PRIVATE_REDIRECT_ERROR_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct async_result, Signature> { typedef typename async_result::type> ::return_type return_type; template struct init_wrapper { template init_wrapper(asio::error_code& ec, ASIO_MOVE_ARG(Init) init) : ec_(ec), initiation_(ASIO_MOVE_CAST(Init)(init)) { } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()( ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Args)... args) { ASIO_MOVE_CAST(Initiation)(initiation_)( detail::redirect_error_handler< typename decay::type>( ec_, ASIO_MOVE_CAST(Handler)(handler)), ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template void operator()( ASIO_MOVE_ARG(Handler) handler) { ASIO_MOVE_CAST(Initiation)(initiation_)( detail::redirect_error_handler< typename decay::type>( ec_, ASIO_MOVE_CAST(Handler)(handler))); } #define ASIO_PRIVATE_INIT_WRAPPER_DEF(n) \ template \ void operator()( \ ASIO_MOVE_ARG(Handler) handler, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ ASIO_MOVE_CAST(Initiation)(initiation_)( \ detail::redirect_error_handler< \ typename decay::type>( \ ec_, ASIO_MOVE_CAST(Handler)(handler)), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INIT_WRAPPER_DEF) #undef ASIO_PRIVATE_INIT_WRAPPER_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) asio::error_code& ec_; Initiation initiation_; }; #if defined(ASIO_HAS_VARIADIC_TEMPLATES) template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken) token, ASIO_MOVE_ARG(Args)... args) { return async_initiate::type>( init_wrapper::type>( token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)), token.token_, ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken) token) { return async_initiate::type>( init_wrapper::type>( token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)), token.token_); } #define ASIO_PRIVATE_INITIATE_DEF(n) \ template \ static return_type initiate( \ ASIO_MOVE_ARG(Initiation) initiation, \ ASIO_MOVE_ARG(RawCompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return async_initiate::type>( \ init_wrapper::type>( \ token.ec_, ASIO_MOVE_CAST(Initiation)(initiation)), \ token.token_, ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF) #undef ASIO_PRIVATE_INITIATE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get( const detail::redirect_error_handler& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::redirect_error_handler& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_REDIRECT_ERROR_HPP galera-4-26.4.25/asio/asio/impl/dispatch.hpp000644 000164 177776 00000005261 15107057155 021657 0ustar00jenkinsnogroup000000 000000 // // impl/dispatch.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_DISPATCH_HPP #define ASIO_IMPL_DISPATCH_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/work_dispatcher.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct initiate_dispatch { template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler) const { typedef typename decay::type DecayedHandler; typename associated_executor::type ex( (get_associated_executor)(handler)); typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.dispatch(ASIO_MOVE_CAST(CompletionHandler)(handler), alloc); } template void operator()(ASIO_MOVE_ARG(CompletionHandler) handler, ASIO_MOVE_ARG(Executor) ex) const { typedef typename decay::type DecayedHandler; typename associated_allocator::type alloc( (get_associated_allocator)(handler)); ex.dispatch(detail::work_dispatcher( ASIO_MOVE_CAST(CompletionHandler)(handler)), alloc); } }; } // namespace detail template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( ASIO_MOVE_ARG(CompletionToken) token) { return async_initiate( detail::initiate_dispatch(), token); } template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return async_initiate( detail::initiate_dispatch(), token, ex); } template inline ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type*) { return (dispatch)(ctx.get_executor(), ASIO_MOVE_CAST(CompletionToken)(token)); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_DISPATCH_HPP galera-4-26.4.25/asio/asio/impl/write_at.hpp000644 000164 177776 00000050553 15107057155 021702 0ustar00jenkinsnogroup000000 000000 // // impl/write_at.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_WRITE_AT_HPP #define ASIO_IMPL_WRITE_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/base_from_completion_cond.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/consuming_buffers.hpp" #include "asio/detail/dependent_type.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template std::size_t write_at_buffer_sequence(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, const ConstBufferIterator&, CompletionCondition completion_condition, asio::error_code& ec) { ec = asio::error_code(); asio::detail::consuming_buffers tmp(buffers); while (!tmp.empty()) { if (std::size_t max_size = detail::adapt_completion_condition_result( completion_condition(ec, tmp.total_consumed()))) { tmp.consume(d.write_some_at(offset + tmp.total_consumed(), tmp.prepare(max_size), ec)); } else break; } return tmp.total_consumed();; } } // namespace detail template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec) { return detail::write_at_buffer_sequence(d, offset, buffers, asio::buffer_sequence_begin(buffers), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t bytes_transferred = write_at( d, offset, buffers, transfer_all(), ec); asio::detail::throw_error(ec, "write_at"); return bytes_transferred; } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { return write_at(d, offset, buffers, transfer_all(), ec); } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition) { asio::error_code ec; std::size_t bytes_transferred = write_at(d, offset, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "write_at"); return bytes_transferred; } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec) { std::size_t bytes_transferred = write_at(d, offset, b.data(), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); b.consume(bytes_transferred); return bytes_transferred; } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b) { asio::error_code ec; std::size_t bytes_transferred = write_at(d, offset, b, transfer_all(), ec); asio::detail::throw_error(ec, "write_at"); return bytes_transferred; } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b, asio::error_code& ec) { return write_at(d, offset, b, transfer_all(), ec); } template inline std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition) { asio::error_code ec; std::size_t bytes_transferred = write_at(d, offset, b, ASIO_MOVE_CAST(CompletionCondition)(completion_condition), ec); asio::detail::throw_error(ec, "write_at"); return bytes_transferred; } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) namespace detail { template class write_at_op : detail::base_from_completion_cond { public: write_at_op(AsyncRandomAccessWriteDevice& device, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition& completion_condition, WriteHandler& handler) : detail::base_from_completion_cond< CompletionCondition>(completion_condition), device_(device), offset_(offset), buffers_(buffers), start_(0), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) write_at_op(const write_at_op& other) : detail::base_from_completion_cond(other), device_(other.device_), offset_(other.offset_), buffers_(other.buffers_), start_(other.start_), handler_(other.handler_) { } write_at_op(write_at_op&& other) : detail::base_from_completion_cond( ASIO_MOVE_CAST(detail::base_from_completion_cond< CompletionCondition>)(other)), device_(other.device_), offset_(other.offset_), buffers_(ASIO_MOVE_CAST(buffers_type)(other.buffers_)), start_(other.start_), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, std::size_t bytes_transferred, int start = 0) { std::size_t max_size; switch (start_ = start) { case 1: max_size = this->check_for_completion(ec, buffers_.total_consumed()); do { device_.async_write_some_at( offset_ + buffers_.total_consumed(), buffers_.prepare(max_size), ASIO_MOVE_CAST(write_at_op)(*this)); return; default: buffers_.consume(bytes_transferred); if ((!ec && bytes_transferred == 0) || buffers_.empty()) break; max_size = this->check_for_completion(ec, buffers_.total_consumed()); } while (max_size > 0); handler_(ec, buffers_.total_consumed()); } } //private: typedef asio::detail::consuming_buffers buffers_type; AsyncRandomAccessWriteDevice& device_; uint64_t offset_; buffers_type buffers_; int start_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, write_at_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, write_at_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( write_at_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, write_at_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, write_at_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void start_write_at_buffer_sequence_op(AsyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, const ConstBufferIterator&, CompletionCondition& completion_condition, WriteHandler& handler) { detail::write_at_op( d, offset, buffers, completion_condition, handler)( asio::error_code(), 0, 1); } struct initiate_async_write_at_buffer_sequence { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, AsyncRandomAccessWriteDevice* d, uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(CompletionCondition) completion_cond) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); non_const_lvalue completion_cond2(completion_cond); start_write_at_buffer_sequence_op(*d, offset, buffers, asio::buffer_sequence_begin(buffers), completion_cond2.value, handler2.value); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::write_at_op, Allocator> { typedef typename associated_allocator::type type; static type get( const detail::write_at_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::write_at_op, Executor> { typedef typename associated_executor::type type; static type get( const detail::write_at_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_write_at_buffer_sequence(), handler, &d, offset, buffers, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_write_at_buffer_sequence(), handler, &d, offset, buffers, transfer_all()); } #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) namespace detail { template class write_at_streambuf_op { public: write_at_streambuf_op( asio::basic_streambuf& streambuf, WriteHandler& handler) : streambuf_(streambuf), handler_(ASIO_MOVE_CAST(WriteHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) write_at_streambuf_op(const write_at_streambuf_op& other) : streambuf_(other.streambuf_), handler_(other.handler_) { } write_at_streambuf_op(write_at_streambuf_op&& other) : streambuf_(other.streambuf_), handler_(ASIO_MOVE_CAST(WriteHandler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(const asio::error_code& ec, const std::size_t bytes_transferred) { streambuf_.consume(bytes_transferred); handler_(ec, bytes_transferred); } //private: asio::basic_streambuf& streambuf_; WriteHandler handler_; }; template inline void* asio_handler_allocate(std::size_t size, write_at_streambuf_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, write_at_streambuf_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( write_at_streambuf_op* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, write_at_streambuf_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, write_at_streambuf_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } struct initiate_async_write_at_streambuf { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, AsyncRandomAccessWriteDevice* d, uint64_t offset, basic_streambuf* b, ASIO_MOVE_ARG(CompletionCondition) completion_condition) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; non_const_lvalue handler2(handler); async_write_at(*d, offset, b->data(), ASIO_MOVE_CAST(CompletionCondition)(completion_condition), write_at_streambuf_op::type>( *b, handler2.value)); } }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template struct associated_allocator< detail::write_at_streambuf_op, Allocator1> { typedef typename associated_allocator::type type; static type get( const detail::write_at_streambuf_op& h, const Allocator1& a = Allocator1()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< detail::write_at_streambuf_op, Executor1> { typedef typename associated_executor::type type; static type get( const detail::write_at_streambuf_op& h, const Executor1& ex = Executor1()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // !defined(GENERATING_DOCUMENTATION) template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_write_at_streambuf(), handler, &d, offset, &b, ASIO_MOVE_CAST(CompletionCondition)(completion_condition)); } template inline ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, asio::basic_streambuf& b, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( detail::initiate_async_write_at_streambuf(), handler, &d, offset, &b, transfer_all()); } #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_WRITE_AT_HPP galera-4-26.4.25/asio/asio/impl/executor.hpp000644 000164 177776 00000017634 15107057155 021725 0ustar00jenkinsnogroup000000 000000 // // impl/executor.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_EXECUTOR_HPP #define ASIO_IMPL_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/executor_function.hpp" #include "asio/detail/global.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/executor.hpp" #include "asio/system_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(GENERATING_DOCUMENTATION) #if defined(ASIO_HAS_MOVE) // Lightweight, move-only function object wrapper. class executor::function { public: template explicit function(F f, const Alloc& a) { // Allocate and construct an operation to wrap the function. typedef detail::executor_function func_type; typename func_type::ptr p = { detail::addressof(a), func_type::ptr::allocate(a), 0 }; func_ = new (p.v) func_type(ASIO_MOVE_CAST(F)(f), a); p.v = 0; } function(function&& other) ASIO_NOEXCEPT : func_(other.func_) { other.func_ = 0; } ~function() { if (func_) func_->destroy(); } void operator()() { if (func_) { detail::executor_function_base* func = func_; func_ = 0; func->complete(); } } private: detail::executor_function_base* func_; }; #else // defined(ASIO_HAS_MOVE) // Not so lightweight, copyable function object wrapper. class executor::function { public: template explicit function(const F& f, const Alloc&) : impl_(new impl(f)) { } void operator()() { impl_->invoke_(impl_.get()); } private: // Base class for polymorphic function implementations. struct impl_base { void (*invoke_)(impl_base*); }; // Polymorphic function implementation. template struct impl : impl_base { impl(const F& f) : function_(f) { invoke_ = &function::invoke; } F function_; }; // Helper to invoke a function. template static void invoke(impl_base* i) { static_cast*>(i)->function_(); } detail::shared_ptr impl_; }; #endif // defined(ASIO_HAS_MOVE) // Default polymorphic allocator implementation. template class executor::impl : public executor::impl_base { public: typedef ASIO_REBIND_ALLOC(Allocator, impl) allocator_type; static impl_base* create(const Executor& e, Allocator a = Allocator()) { raw_mem mem(a); impl* p = new (mem.ptr_) impl(e, a); mem.ptr_ = 0; return p; } impl(const Executor& e, const Allocator& a) ASIO_NOEXCEPT : impl_base(false), ref_count_(1), executor_(e), allocator_(a) { } impl_base* clone() const ASIO_NOEXCEPT { ++ref_count_; return const_cast(static_cast(this)); } void destroy() ASIO_NOEXCEPT { if (--ref_count_ == 0) { allocator_type alloc(allocator_); impl* p = this; p->~impl(); alloc.deallocate(p, 1); } } void on_work_started() ASIO_NOEXCEPT { executor_.on_work_started(); } void on_work_finished() ASIO_NOEXCEPT { executor_.on_work_finished(); } execution_context& context() ASIO_NOEXCEPT { return executor_.context(); } void dispatch(ASIO_MOVE_ARG(function) f) { executor_.dispatch(ASIO_MOVE_CAST(function)(f), allocator_); } void post(ASIO_MOVE_ARG(function) f) { executor_.post(ASIO_MOVE_CAST(function)(f), allocator_); } void defer(ASIO_MOVE_ARG(function) f) { executor_.defer(ASIO_MOVE_CAST(function)(f), allocator_); } type_id_result_type target_type() const ASIO_NOEXCEPT { return type_id(); } void* target() ASIO_NOEXCEPT { return &executor_; } const void* target() const ASIO_NOEXCEPT { return &executor_; } bool equals(const impl_base* e) const ASIO_NOEXCEPT { if (this == e) return true; if (target_type() != e->target_type()) return false; return executor_ == *static_cast(e->target()); } private: mutable detail::atomic_count ref_count_; Executor executor_; Allocator allocator_; struct raw_mem { allocator_type allocator_; impl* ptr_; explicit raw_mem(const Allocator& a) : allocator_(a), ptr_(allocator_.allocate(1)) { } ~raw_mem() { if (ptr_) allocator_.deallocate(ptr_, 1); } private: // Disallow copying and assignment. raw_mem(const raw_mem&); raw_mem operator=(const raw_mem&); }; }; // Polymorphic allocator specialisation for system_executor. template class executor::impl : public executor::impl_base { public: static impl_base* create(const system_executor&, const Allocator& = Allocator()) { return &detail::global > >(); } impl() : impl_base(true) { } impl_base* clone() const ASIO_NOEXCEPT { return const_cast(static_cast(this)); } void destroy() ASIO_NOEXCEPT { } void on_work_started() ASIO_NOEXCEPT { executor_.on_work_started(); } void on_work_finished() ASIO_NOEXCEPT { executor_.on_work_finished(); } execution_context& context() ASIO_NOEXCEPT { return executor_.context(); } void dispatch(ASIO_MOVE_ARG(function) f) { executor_.dispatch(ASIO_MOVE_CAST(function)(f), allocator_); } void post(ASIO_MOVE_ARG(function) f) { executor_.post(ASIO_MOVE_CAST(function)(f), allocator_); } void defer(ASIO_MOVE_ARG(function) f) { executor_.defer(ASIO_MOVE_CAST(function)(f), allocator_); } type_id_result_type target_type() const ASIO_NOEXCEPT { return type_id(); } void* target() ASIO_NOEXCEPT { return &executor_; } const void* target() const ASIO_NOEXCEPT { return &executor_; } bool equals(const impl_base* e) const ASIO_NOEXCEPT { return this == e; } private: system_executor executor_; Allocator allocator_; }; template executor::executor(Executor e) : impl_(impl >::create(e)) { } template executor::executor(allocator_arg_t, const Allocator& a, Executor e) : impl_(impl::create(e, a)) { } template void executor::dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { impl_base* i = get_impl(); if (i->fast_dispatch_) system_executor().dispatch(ASIO_MOVE_CAST(Function)(f), a); else i->dispatch(function(ASIO_MOVE_CAST(Function)(f), a)); } template void executor::post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { get_impl()->post(function(ASIO_MOVE_CAST(Function)(f), a)); } template void executor::defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { get_impl()->defer(function(ASIO_MOVE_CAST(Function)(f), a)); } template Executor* executor::target() ASIO_NOEXCEPT { return impl_ && impl_->target_type() == type_id() ? static_cast(impl_->target()) : 0; } template const Executor* executor::target() const ASIO_NOEXCEPT { return impl_ && impl_->target_type() == type_id() ? static_cast(impl_->target()) : 0; } #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_EXECUTOR_HPP galera-4-26.4.25/asio/asio/impl/spawn.hpp000644 000164 177776 00000033172 15107057155 021212 0ustar00jenkinsnogroup000000 000000 // // impl/spawn.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_SPAWN_HPP #define ASIO_IMPL_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/async_result.hpp" #include "asio/bind_executor.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class coro_handler { public: coro_handler(basic_yield_context ctx) : coro_(ctx.coro_.lock()), ca_(ctx.ca_), handler_(ctx.handler_), ready_(0), ec_(ctx.ec_), value_(0) { } void operator()(T value) { *ec_ = asio::error_code(); *value_ = ASIO_MOVE_CAST(T)(value); if (--*ready_ == 0) (*coro_)(); } void operator()(asio::error_code ec, T value) { *ec_ = ec; *value_ = ASIO_MOVE_CAST(T)(value); if (--*ready_ == 0) (*coro_)(); } //private: shared_ptr::callee_type> coro_; typename basic_yield_context::caller_type& ca_; Handler handler_; atomic_count* ready_; asio::error_code* ec_; T* value_; }; template class coro_handler { public: coro_handler(basic_yield_context ctx) : coro_(ctx.coro_.lock()), ca_(ctx.ca_), handler_(ctx.handler_), ready_(0), ec_(ctx.ec_) { } void operator()() { *ec_ = asio::error_code(); if (--*ready_ == 0) (*coro_)(); } void operator()(asio::error_code ec) { *ec_ = ec; if (--*ready_ == 0) (*coro_)(); } //private: shared_ptr::callee_type> coro_; typename basic_yield_context::caller_type& ca_; Handler handler_; atomic_count* ready_; asio::error_code* ec_; }; template inline void* asio_handler_allocate(std::size_t size, coro_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, coro_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation(coro_handler*) { return true; } template inline void asio_handler_invoke(Function& function, coro_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, coro_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template class coro_async_result { public: typedef coro_handler completion_handler_type; typedef T return_type; explicit coro_async_result(completion_handler_type& h) : handler_(h), ca_(h.ca_), ready_(2) { h.ready_ = &ready_; out_ec_ = h.ec_; if (!out_ec_) h.ec_ = &ec_; h.value_ = &value_; } return_type get() { // Must not hold shared_ptr to coro while suspended. handler_.coro_.reset(); if (--ready_ != 0) ca_(); if (!out_ec_ && ec_) throw asio::system_error(ec_); return ASIO_MOVE_CAST(return_type)(value_); } private: completion_handler_type& handler_; typename basic_yield_context::caller_type& ca_; atomic_count ready_; asio::error_code* out_ec_; asio::error_code ec_; return_type value_; }; template class coro_async_result { public: typedef coro_handler completion_handler_type; typedef void return_type; explicit coro_async_result(completion_handler_type& h) : handler_(h), ca_(h.ca_), ready_(2) { h.ready_ = &ready_; out_ec_ = h.ec_; if (!out_ec_) h.ec_ = &ec_; } void get() { // Must not hold shared_ptr to coro while suspended. handler_.coro_.reset(); if (--ready_ != 0) ca_(); if (!out_ec_ && ec_) throw asio::system_error(ec_); } private: completion_handler_type& handler_; typename basic_yield_context::caller_type& ca_; atomic_count ready_; asio::error_code* out_ec_; asio::error_code ec_; }; } // namespace detail #if !defined(GENERATING_DOCUMENTATION) template class async_result, ReturnType()> : public detail::coro_async_result { public: explicit async_result( typename detail::coro_async_result::completion_handler_type& h) : detail::coro_async_result(h) { } }; template class async_result, ReturnType(Arg1)> : public detail::coro_async_result::type> { public: explicit async_result( typename detail::coro_async_result::type>::completion_handler_type& h) : detail::coro_async_result::type>(h) { } }; template class async_result, ReturnType(asio::error_code)> : public detail::coro_async_result { public: explicit async_result( typename detail::coro_async_result::completion_handler_type& h) : detail::coro_async_result(h) { } }; template class async_result, ReturnType(asio::error_code, Arg2)> : public detail::coro_async_result::type> { public: explicit async_result( typename detail::coro_async_result::type>::completion_handler_type& h) : detail::coro_async_result::type>(h) { } }; template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::coro_handler& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get(const detail::coro_handler& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; namespace detail { template struct spawn_data : private noncopyable { template spawn_data(ASIO_MOVE_ARG(Hand) handler, bool call_handler, ASIO_MOVE_ARG(Func) function) : handler_(ASIO_MOVE_CAST(Hand)(handler)), call_handler_(call_handler), function_(ASIO_MOVE_CAST(Func)(function)) { } weak_ptr::callee_type> coro_; Handler handler_; bool call_handler_; Function function_; }; template struct coro_entry_point { void operator()(typename basic_yield_context::caller_type& ca) { shared_ptr > data(data_); #if !defined(BOOST_COROUTINES_UNIDIRECT) && !defined(BOOST_COROUTINES_V2) ca(); // Yield until coroutine pointer has been initialised. #endif // !defined(BOOST_COROUTINES_UNIDIRECT) && !defined(BOOST_COROUTINES_V2) const basic_yield_context yield( data->coro_, ca, data->handler_); (data->function_)(yield); if (data->call_handler_) (data->handler_)(); } shared_ptr > data_; }; template struct spawn_helper { void operator()() { typedef typename basic_yield_context::callee_type callee_type; coro_entry_point entry_point = { data_ }; shared_ptr coro(new callee_type(entry_point, attributes_)); data_->coro_ = coro; (*coro)(); } shared_ptr > data_; boost::coroutines::attributes attributes_; }; template inline void asio_handler_invoke(Function& function, spawn_helper* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->data_->handler_); } template inline void asio_handler_invoke(const Function& function, spawn_helper* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->data_->handler_); } inline void default_spawn_handler() {} } // namespace detail template inline void spawn(ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes) { typedef typename decay::type function_type; typename associated_executor::type ex( (get_associated_executor)(function)); asio::spawn(ex, ASIO_MOVE_CAST(Function)(function), attributes); } template void spawn(ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes, typename enable_if::type>::value && !is_convertible::value>::type*) { typedef typename decay::type handler_type; typedef typename decay::type function_type; typename associated_executor::type ex( (get_associated_executor)(handler)); typename associated_allocator::type a( (get_associated_allocator)(handler)); detail::spawn_helper helper; helper.data_.reset( new detail::spawn_data( ASIO_MOVE_CAST(Handler)(handler), true, ASIO_MOVE_CAST(Function)(function))); helper.attributes_ = attributes; ex.dispatch(helper, a); } template void spawn(basic_yield_context ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes) { typedef typename decay::type function_type; Handler handler(ctx.handler_); // Explicit copy that might be moved from. typename associated_executor::type ex( (get_associated_executor)(handler)); typename associated_allocator::type a( (get_associated_allocator)(handler)); detail::spawn_helper helper; helper.data_.reset( new detail::spawn_data( ASIO_MOVE_CAST(Handler)(handler), false, ASIO_MOVE_CAST(Function)(function))); helper.attributes_ = attributes; ex.dispatch(helper, a); } template inline void spawn(const Executor& ex, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes, typename enable_if::value>::type*) { asio::spawn(asio::strand(ex), ASIO_MOVE_CAST(Function)(function), attributes); } template inline void spawn(const strand& ex, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes) { asio::spawn(asio::bind_executor( ex, &detail::default_spawn_handler), ASIO_MOVE_CAST(Function)(function), attributes); } template inline void spawn(const asio::io_context::strand& s, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes) { asio::spawn(asio::bind_executor( s, &detail::default_spawn_handler), ASIO_MOVE_CAST(Function)(function), attributes); } template inline void spawn(ExecutionContext& ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes, typename enable_if::value>::type*) { asio::spawn(ctx.get_executor(), ASIO_MOVE_CAST(Function)(function), attributes); } #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_SPAWN_HPP galera-4-26.4.25/asio/asio/impl/thread_pool.hpp000644 000164 177776 00000007071 15107057155 022361 0ustar00jenkinsnogroup000000 000000 // // impl/thread_pool.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IMPL_THREAD_POOL_HPP #define ASIO_IMPL_THREAD_POOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/executor_op.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/detail/type_traits.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { inline thread_pool::executor_type thread_pool::get_executor() ASIO_NOEXCEPT { return executor_type(*this); } inline thread_pool& thread_pool::executor_type::context() const ASIO_NOEXCEPT { return pool_; } inline void thread_pool::executor_type::on_work_started() const ASIO_NOEXCEPT { pool_.scheduler_.work_started(); } inline void thread_pool::executor_type::on_work_finished() const ASIO_NOEXCEPT { pool_.scheduler_.work_finished(); } template void thread_pool::executor_type::dispatch( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Invoke immediately if we are already inside the thread pool. if (pool_.scheduler_.can_dispatch()) { // Make a local, non-const copy of the function. function_type tmp(ASIO_MOVE_CAST(Function)(f)); detail::fenced_block b(detail::fenced_block::full); asio_handler_invoke_helpers::invoke(tmp, tmp); return; } // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((pool_, *p.p, "thread_pool", &this->context(), 0, "dispatch")); pool_.scheduler_.post_immediate_completion(p.p, false); p.v = p.p = 0; } template void thread_pool::executor_type::post( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((pool_, *p.p, "thread_pool", &this->context(), 0, "post")); pool_.scheduler_.post_immediate_completion(p.p, false); p.v = p.p = 0; } template void thread_pool::executor_type::defer( ASIO_MOVE_ARG(Function) f, const Allocator& a) const { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef detail::executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(f), a); ASIO_HANDLER_CREATION((pool_, *p.p, "thread_pool", &this->context(), 0, "defer")); pool_.scheduler_.post_immediate_completion(p.p, true); p.v = p.p = 0; } inline bool thread_pool::executor_type::running_in_this_thread() const ASIO_NOEXCEPT { return pool_.scheduler_.can_dispatch(); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IMPL_THREAD_POOL_HPP galera-4-26.4.25/asio/asio/basic_datagram_socket.hpp000644 000164 177776 00000125160 15107057155 023411 0ustar00jenkinsnogroup000000 000000 // // basic_datagram_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_DATAGRAM_SOCKET_HPP #define ASIO_BASIC_DATAGRAM_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL) #define ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_datagram_socket; #endif // !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL) /// Provides datagram-oriented socket functionality. /** * The basic_datagram_socket class template provides asynchronous and blocking * datagram-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_datagram_socket : public basic_socket { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Rebinds the socket type to another executor. template struct rebind_executor { /// The socket type when rebound to the specified executor. typedef basic_datagram_socket other; }; /// The native representation of a socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename basic_socket::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_datagram_socket without opening it. /** * This constructor creates a datagram socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_datagram_socket(const executor_type& ex) : basic_socket(ex) { } /// Construct a basic_datagram_socket without opening it. /** * This constructor creates a datagram socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. */ template explicit basic_datagram_socket(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context) { } /// Construct and open a basic_datagram_socket. /** * This constructor creates and opens a datagram socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(const executor_type& ex, const protocol_type& protocol) : basic_socket(ex, protocol) { } /// Construct and open a basic_datagram_socket. /** * This constructor creates and opens a datagram socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_datagram_socket(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol) { } /// Construct a basic_datagram_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a datagram socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the datagram * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(const executor_type& ex, const endpoint_type& endpoint) : basic_socket(ex, endpoint) { } /// Construct a basic_datagram_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a datagram socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the datagram * socket will be bound. * * @throws asio::system_error Thrown on failure. */ template basic_datagram_socket(ExecutionContext& context, const endpoint_type& endpoint, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, endpoint) { } /// Construct a basic_datagram_socket on an existing native socket. /** * This constructor creates a datagram socket object to hold an existing * native socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_datagram_socket(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket(ex, protocol, native_socket) { } /// Construct a basic_datagram_socket on an existing native socket. /** * This constructor creates a datagram socket object to hold an existing * native socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ template basic_datagram_socket(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_socket, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_datagram_socket from another. /** * This constructor moves a datagram socket from one object to another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(const executor_type&) * constructor. */ basic_datagram_socket(basic_datagram_socket&& other) : basic_socket(std::move(other)) { } /// Move-assign a basic_datagram_socket from another. /** * This assignment operator moves a datagram socket from one object to * another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(const executor_type&) * constructor. */ basic_datagram_socket& operator=(basic_datagram_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } /// Move-construct a basic_datagram_socket from a socket of another protocol /// type. /** * This constructor moves a datagram socket from one object to another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(const executor_type&) * constructor. */ template basic_datagram_socket(basic_datagram_socket&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : basic_socket(std::move(other)) { } /// Move-assign a basic_datagram_socket from a socket of another protocol /// type. /** * This assignment operator moves a datagram socket from one object to * another. * * @param other The other basic_datagram_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_datagram_socket(const executor_type&) * constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_datagram_socket& >::type operator=(basic_datagram_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the socket. /** * This function destroys the socket, cancelling any outstanding asynchronous * operations associated with the socket as if by calling @c cancel. */ ~basic_datagram_socket() { } /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code socket.send(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the datagram socket. The function * call will block until the data has been sent successfully or an error * occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected datagram socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous send on a connected socket. /** * This function is used to asynchronously send data on the datagram socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected datagram * socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous send on a connected socket. /** * This function is used to asynchronously send data on the datagram socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected datagram * socket. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, flags); } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.send_to(asio::buffer(data, size), destination); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination) { asio::error_code ec; std::size_t s = this->impl_.get_service().send_to( this->impl_.get_implementation(), buffers, destination, 0, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send_to( this->impl_.get_implementation(), buffers, destination, flags, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send a datagram to the specified endpoint. /** * This function is used to send a datagram to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send_to(this->impl_.get_implementation(), buffers, destination, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send a datagram to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_send_to( * asio::buffer(data, size), destination, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send_to(), handler, this, buffers, destination, socket_base::message_flags(0)); } /// Start an asynchronous send. /** * This function is used to asynchronously send a datagram to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send_to(), handler, this, buffers, destination, flags); } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.receive(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the datagram socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected datagram * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the datagram * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * datagram socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the datagram * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * datagram socket. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, flags); } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * asio::ip::udp::endpoint sender_endpoint; * socket.receive_from( * asio::buffer(data, size), sender_endpoint); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, 0, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive a datagram with the endpoint of the sender. /** * This function is used to receive a datagram. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive a datagram. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.async_receive_from( * asio::buffer(data, size), sender_endpoint, handler); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_from(), handler, this, buffers, &sender_endpoint, socket_base::message_flags(0)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive a datagram. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the datagram. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_from(), handler, this, buffers, &sender_endpoint, flags); } private: struct initiate_async_send { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_datagram_socket* self, const ConstBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_send_to { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_datagram_socket* self, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send_to( self->impl_.get_implementation(), buffers, destination, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_datagram_socket* self, const MutableBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive_from { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_datagram_socket* self, const MutableBufferSequence& buffers, endpoint_type* sender_endpoint, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive_from( self->impl_.get_implementation(), buffers, *sender_endpoint, flags, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_DATAGRAM_SOCKET_HPP galera-4-26.4.25/asio/asio/local/000755 000164 177776 00000000000 15107057160 017470 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/local/detail/000755 000164 177776 00000000000 15107057160 020732 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/local/detail/impl/000755 000164 177776 00000000000 15107057160 021673 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/local/detail/impl/endpoint.ipp000644 000164 177776 00000006355 15107057155 024242 0ustar00jenkinsnogroup000000 000000 // // local/detail/impl/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/local/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { namespace detail { endpoint::endpoint() { init("", 0); } endpoint::endpoint(const char* path_name) { using namespace std; // For strlen. init(path_name, strlen(path_name)); } endpoint::endpoint(const std::string& path_name) { init(path_name.data(), path_name.length()); } #if defined(ASIO_HAS_STRING_VIEW) endpoint::endpoint(string_view path_name) { init(path_name.data(), path_name.length()); } #endif // defined(ASIO_HAS_STRING_VIEW) void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_un_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } else if (new_size == 0) { path_length_ = 0; } else { path_length_ = new_size - offsetof(asio::detail::sockaddr_un_type, sun_path); // The path returned by the operating system may be NUL-terminated. if (path_length_ > 0 && data_.local.sun_path[path_length_ - 1] == 0) --path_length_; } } std::string endpoint::path() const { return std::string(data_.local.sun_path, path_length_); } void endpoint::path(const char* p) { using namespace std; // For strlen. init(p, strlen(p)); } void endpoint::path(const std::string& p) { init(p.data(), p.length()); } bool operator==(const endpoint& e1, const endpoint& e2) { return e1.path() == e2.path(); } bool operator<(const endpoint& e1, const endpoint& e2) { return e1.path() < e2.path(); } void endpoint::init(const char* path_name, std::size_t path_length) { if (path_length > sizeof(data_.local.sun_path) - 1) { // The buffer is not large enough to store this address. asio::error_code ec(asio::error::name_too_long); asio::detail::throw_error(ec); } using namespace std; // For memcpy. data_.local = asio::detail::sockaddr_un_type(); data_.local.sun_family = AF_UNIX; if (path_length > 0) memcpy(data_.local.sun_path, path_name, path_length); path_length_ = path_length; // NUL-terminate normal path names. Names that start with a NUL are in the // UNIX domain protocol's "abstract namespace" and are not NUL-terminated. if (path_length > 0 && data_.local.sun_path[0] == 0) data_.local.sun_path[path_length] = 0; } } // namespace detail } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) #endif // ASIO_LOCAL_DETAIL_IMPL_ENDPOINT_IPP galera-4-26.4.25/asio/asio/local/detail/endpoint.hpp000644 000164 177776 00000006764 15107057155 023304 0ustar00jenkinsnogroup000000 000000 // // local/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DETAIL_ENDPOINT_HPP #define ASIO_LOCAL_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) #include #include #include "asio/detail/socket_types.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { namespace detail { // Helper class for implementing a UNIX domain endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint(); // Construct an endpoint using the specified path name. ASIO_DECL endpoint(const char* path_name); // Construct an endpoint using the specified path name. ASIO_DECL endpoint(const std::string& path_name); #if defined(ASIO_HAS_STRING_VIEW) // Construct an endpoint using the specified path name. ASIO_DECL endpoint(string_view path_name); #endif // defined(ASIO_HAS_STRING_VIEW) // Copy constructor. endpoint(const endpoint& other) : data_(other.data_), path_length_(other.path_length_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) { data_ = other.data_; path_length_ = other.path_length_; return *this; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const { return path_length_ + offsetof(asio::detail::sockaddr_un_type, sun_path); } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(asio::detail::sockaddr_un_type); } // Get the path associated with the endpoint. ASIO_DECL std::string path() const; // Set the path associated with the endpoint. ASIO_DECL void path(const char* p); // Set the path associated with the endpoint. ASIO_DECL void path(const std::string& p); // Compare two endpoints for equality. ASIO_DECL friend bool operator==( const endpoint& e1, const endpoint& e2); // Compare endpoints for ordering. ASIO_DECL friend bool operator<( const endpoint& e1, const endpoint& e2); private: // The underlying UNIX socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_un_type local; } data_; // The length of the path associated with the endpoint. std::size_t path_length_; // Initialise with a specified path. ASIO_DECL void init(const char* path, std::size_t path_length); }; } // namespace detail } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/local/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_LOCAL_SOCKETS) #endif // ASIO_LOCAL_DETAIL_ENDPOINT_HPP galera-4-26.4.25/asio/asio/local/basic_endpoint.hpp000644 000164 177776 00000013210 15107057155 023163 0ustar00jenkinsnogroup000000 000000 // // local/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Derived from a public domain implementation written by Daniel Casimiro. // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_BASIC_ENDPOINT_HPP #define ASIO_LOCAL_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/local/detail/endpoint.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Describes an endpoint for a UNIX socket. /** * The asio::local::basic_endpoint class template describes an endpoint * that may be associated with a particular UNIX socket. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef Protocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() { } /// Construct an endpoint using the specified path name. basic_endpoint(const char* path_name) : impl_(path_name) { } /// Construct an endpoint using the specified path name. basic_endpoint(const std::string& path_name) : impl_(path_name) { } #if defined(ASIO_HAS_STRING_VIEW) /// Construct an endpoint using the specified path name. basic_endpoint(string_view path_name) : impl_(path_name) { } #endif // defined(ASIO_HAS_STRING_VIEW) /// Copy constructor. basic_endpoint(const basic_endpoint& other) : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_endpoint(basic_endpoint&& other) : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// The protocol associated with the endpoint. protocol_type protocol() const { return protocol_type(); } /// Get the underlying endpoint in the native type. data_type* data() { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return impl_.capacity(); } /// Get the path associated with the endpoint. std::string path() const { return impl_.path(); } /// Set the path associated with the endpoint. void path(const char* p) { impl_.path(p); } /// Set the path associated with the endpoint. void path(const std::string& p) { impl_.path(p); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1.impl_ == e2.impl_); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 < e2); } private: // The underlying UNIX domain endpoint. asio::local::detail::endpoint impl_; }; /// Output an endpoint as a string. /** * Used to output a human-readable string for a specified endpoint. * * @param os The output stream to which the string will be written. * * @param endpoint The endpoint to be written. * * @return The output stream. * * @relates asio::local::basic_endpoint */ template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint) { os << endpoint.path(); return os; } } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_BASIC_ENDPOINT_HPP galera-4-26.4.25/asio/asio/local/stream_protocol.hpp000644 000164 177776 00000004227 15107057155 023426 0ustar00jenkinsnogroup000000 000000 // // local/stream_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_STREAM_PROTOCOL_HPP #define ASIO_LOCAL_STREAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Encapsulates the flags needed for stream-oriented UNIX sockets. /** * The asio::local::stream_protocol class contains flags necessary for * stream-oriented UNIX domain sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class stream_protocol { public: /// Obtain an identifier for the type of the protocol. int type() const { return SOCK_STREAM; } /// Obtain an identifier for the protocol. int protocol() const { return 0; } /// Obtain an identifier for the protocol family. int family() const { return AF_UNIX; } /// The type of a UNIX domain endpoint. typedef basic_endpoint endpoint; /// The UNIX domain socket type. typedef basic_stream_socket socket; /// The UNIX domain acceptor type. typedef basic_socket_acceptor acceptor; #if !defined(ASIO_NO_IOSTREAM) /// The UNIX domain iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) }; } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_STREAM_PROTOCOL_HPP galera-4-26.4.25/asio/asio/local/datagram_protocol.hpp000644 000164 177776 00000003526 15107057155 023714 0ustar00jenkinsnogroup000000 000000 // // local/datagram_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP #define ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Encapsulates the flags needed for datagram-oriented UNIX sockets. /** * The asio::local::datagram_protocol class contains flags necessary for * datagram-oriented UNIX domain sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class datagram_protocol { public: /// Obtain an identifier for the type of the protocol. int type() const { return SOCK_DGRAM; } /// Obtain an identifier for the protocol. int protocol() const { return 0; } /// Obtain an identifier for the protocol family. int family() const { return AF_UNIX; } /// The type of a UNIX domain endpoint. typedef basic_endpoint endpoint; /// The UNIX domain socket type. typedef basic_datagram_socket socket; }; } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_DATAGRAM_PROTOCOL_HPP galera-4-26.4.25/asio/asio/local/connect_pair.hpp000644 000164 177776 00000006070 15107057155 022654 0ustar00jenkinsnogroup000000 000000 // // local/connect_pair.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_LOCAL_CONNECT_PAIR_HPP #define ASIO_LOCAL_CONNECT_PAIR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_LOCAL_SOCKETS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_socket.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace local { /// Create a pair of connected sockets. template void connect_pair(basic_socket& socket1, basic_socket& socket2); /// Create a pair of connected sockets. template ASIO_SYNC_OP_VOID connect_pair(basic_socket& socket1, basic_socket& socket2, asio::error_code& ec); template inline void connect_pair(basic_socket& socket1, basic_socket& socket2) { asio::error_code ec; connect_pair(socket1, socket2, ec); asio::detail::throw_error(ec, "connect_pair"); } template inline ASIO_SYNC_OP_VOID connect_pair( basic_socket& socket1, basic_socket& socket2, asio::error_code& ec) { // Check that this function is only being used with a UNIX domain socket. asio::local::basic_endpoint* tmp = static_cast(0); (void)tmp; Protocol protocol; asio::detail::socket_type sv[2]; if (asio::detail::socket_ops::socketpair(protocol.family(), protocol.type(), protocol.protocol(), sv, ec) == asio::detail::socket_error_retval) ASIO_SYNC_OP_VOID_RETURN(ec); socket1.assign(protocol, sv[0], ec); if (ec) { asio::error_code temp_ec; asio::detail::socket_ops::state_type state[2] = { 0, 0 }; asio::detail::socket_ops::close(sv[0], state[0], true, temp_ec); asio::detail::socket_ops::close(sv[1], state[1], true, temp_ec); ASIO_SYNC_OP_VOID_RETURN(ec); } socket2.assign(protocol, sv[1], ec); if (ec) { asio::error_code temp_ec; socket1.close(temp_ec); asio::detail::socket_ops::state_type state = 0; asio::detail::socket_ops::close(sv[1], state, true, temp_ec); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID_RETURN(ec); } } // namespace local } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_LOCAL_SOCKETS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_LOCAL_CONNECT_PAIR_HPP galera-4-26.4.25/asio/asio/io_service.hpp000644 000164 177776 00000001406 15107057155 021243 0ustar00jenkinsnogroup000000 000000 // // io_service.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_SERVICE_HPP #define ASIO_IO_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_NO_DEPRECATED) /// Typedef for backwards compatibility. typedef io_context io_service; #endif // !defined(ASIO_NO_DEPRECATED) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IO_SERVICE_HPP galera-4-26.4.25/asio/asio/basic_raw_socket.hpp000644 000164 177776 00000124223 15107057155 022421 0ustar00jenkinsnogroup000000 000000 // // basic_raw_socket.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_RAW_SOCKET_HPP #define ASIO_BASIC_RAW_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_RAW_SOCKET_FWD_DECL) #define ASIO_BASIC_RAW_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_raw_socket; #endif // !defined(ASIO_BASIC_RAW_SOCKET_FWD_DECL) /// Provides raw-oriented socket functionality. /** * The basic_raw_socket class template provides asynchronous and blocking * raw-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_raw_socket : public basic_socket { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Rebinds the socket type to another executor. template struct rebind_executor { /// The socket type when rebound to the specified executor. typedef basic_raw_socket other; }; /// The native representation of a socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename basic_socket::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_raw_socket without opening it. /** * This constructor creates a raw socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_raw_socket(const executor_type& ex) : basic_socket(ex) { } /// Construct a basic_raw_socket without opening it. /** * This constructor creates a raw socket without opening it. The open() * function must be called before data can be sent or received on the socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. */ template explicit basic_raw_socket(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context) { } /// Construct and open a basic_raw_socket. /** * This constructor creates and opens a raw socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(const executor_type& ex, const protocol_type& protocol) : basic_socket(ex, protocol) { } /// Construct and open a basic_raw_socket. /** * This constructor creates and opens a raw socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_raw_socket(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol) { } /// Construct a basic_raw_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a raw socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the raw * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(const executor_type& ex, const endpoint_type& endpoint) : basic_socket(ex, endpoint) { } /// Construct a basic_raw_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a raw socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the raw * socket will be bound. * * @throws asio::system_error Thrown on failure. */ template basic_raw_socket(ExecutionContext& context, const endpoint_type& endpoint, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, endpoint) { } /// Construct a basic_raw_socket on an existing native socket. /** * This constructor creates a raw socket object to hold an existing * native socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_raw_socket(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket(ex, protocol, native_socket) { } /// Construct a basic_raw_socket on an existing native socket. /** * This constructor creates a raw socket object to hold an existing * native socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ template basic_raw_socket(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_socket, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_raw_socket from another. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(const executor_type&) * constructor. */ basic_raw_socket(basic_raw_socket&& other) : basic_socket(std::move(other)) { } /// Move-assign a basic_raw_socket from another. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(const executor_type&) * constructor. */ basic_raw_socket& operator=(basic_raw_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } /// Move-construct a basic_raw_socket from a socket of another protocol /// type. /** * This constructor moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(const executor_type&) * constructor. */ template basic_raw_socket(basic_raw_socket&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : basic_socket(std::move(other)) { } /// Move-assign a basic_raw_socket from a socket of another protocol type. /** * This assignment operator moves a raw socket from one object to another. * * @param other The other basic_raw_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_raw_socket(const executor_type&) * constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_raw_socket& >::type operator=(basic_raw_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the socket. /** * This function destroys the socket, cancelling any outstanding asynchronous * operations associated with the socket as if by calling @c cancel. */ ~basic_raw_socket() { } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code socket.send(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One ore more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. * * @note The send operation can only be used with a connected socket. Use * the send_to function to send data on an unconnected raw socket. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous send on a connected socket. /** * This function is used to send data on the raw socket. The function call * will block until the data has been sent successfully or an error occurs. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_send operation can only be used with a connected socket. * Use the async_send_to function to send data on an unconnected raw * socket. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, flags); } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.send_to(asio::buffer(data, size), destination); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination) { asio::error_code ec; std::size_t s = this->impl_.get_service().send_to( this->impl_.get_implementation(), buffers, destination, 0, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send_to( this->impl_.get_implementation(), buffers, destination, flags, ec); asio::detail::throw_error(ec, "send_to"); return s; } /// Send raw data to the specified endpoint. /** * This function is used to send raw data to the specified remote endpoint. * The function call will block until the data has been sent successfully or * an error occurs. * * @param buffers One or more data buffers to be sent to the remote endpoint. * * @param destination The remote endpoint to which the data will be sent. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. */ template std::size_t send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send_to(this->impl_.get_implementation(), buffers, destination, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * asio::ip::udp::endpoint destination( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_send_to( * asio::buffer(data, size), destination, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send_to(), handler, this, buffers, destination, socket_base::message_flags(0)); } /// Start an asynchronous send. /** * This function is used to asynchronously send raw data to the specified * remote endpoint. The function call always returns immediately. * * @param buffers One or more data buffers to be sent to the remote endpoint. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param destination The remote endpoint to which the data will be sent. * Copies will be made of the endpoint as required. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send_to(const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send_to(), handler, this, buffers, destination, flags); } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.receive(asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the raw socket. The function * call will block until data has been received successfully or an error * occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. * * @note The receive operation can only be used with a connected socket. Use * the receive_from function to receive data on an unconnected raw * socket. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous receive on a connected socket. /** * This function is used to asynchronously receive data from the raw * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The async_receive operation can only be used with a connected socket. * Use the async_receive_from function to receive data on an unconnected * raw socket. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, flags); } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * asio::ip::udp::endpoint sender_endpoint; * socket.receive_from( * asio::buffer(data, size), sender_endpoint); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, 0, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec); asio::detail::throw_error(ec, "receive_from"); return s; } /// Receive raw data with the endpoint of the sender. /** * This function is used to receive raw data. The function call will block * until data has been received successfully or an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. */ template std::size_t receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().receive_from( this->impl_.get_implementation(), buffers, sender_endpoint, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code socket.async_receive_from( * asio::buffer(data, size), 0, sender_endpoint, handler); @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_from(), handler, this, buffers, &sender_endpoint, socket_base::message_flags(0)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive raw data. The function * call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param sender_endpoint An endpoint object that receives the endpoint of * the remote sender of the data. Ownership of the sender_endpoint object * is retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive_from(const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_from(), handler, this, buffers, &sender_endpoint, flags); } private: struct initiate_async_send { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_raw_socket* self, const ConstBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_send_to { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_raw_socket* self, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send_to( self->impl_.get_implementation(), buffers, destination, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_raw_socket* self, const MutableBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive_from { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_raw_socket* self, const MutableBufferSequence& buffers, endpoint_type* sender_endpoint, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive_from( self->impl_.get_implementation(), buffers, *sender_endpoint, flags, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_RAW_SOCKET_HPP galera-4-26.4.25/asio/asio/completion_condition.hpp000644 000164 177776 00000012327 15107057155 023337 0ustar00jenkinsnogroup000000 000000 // // completion_condition.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COMPLETION_CONDITION_HPP #define ASIO_COMPLETION_CONDITION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // The default maximum number of bytes to transfer in a single operation. enum default_max_transfer_size_t { default_max_transfer_size = 65536 }; // Adapt result of old-style completion conditions (which had a bool result // where true indicated that the operation was complete). inline std::size_t adapt_completion_condition_result(bool result) { return result ? 0 : default_max_transfer_size; } // Adapt result of current completion conditions (which have a size_t result // where 0 means the operation is complete, and otherwise the result is the // maximum number of bytes to transfer on the next underlying operation). inline std::size_t adapt_completion_condition_result(std::size_t result) { return result; } class transfer_all_t { public: typedef std::size_t result_type; template std::size_t operator()(const Error& err, std::size_t) { return !!err ? 0 : default_max_transfer_size; } }; class transfer_at_least_t { public: typedef std::size_t result_type; explicit transfer_at_least_t(std::size_t minimum) : minimum_(minimum) { } template std::size_t operator()(const Error& err, std::size_t bytes_transferred) { return (!!err || bytes_transferred >= minimum_) ? 0 : default_max_transfer_size; } private: std::size_t minimum_; }; class transfer_exactly_t { public: typedef std::size_t result_type; explicit transfer_exactly_t(std::size_t size) : size_(size) { } template std::size_t operator()(const Error& err, std::size_t bytes_transferred) { return (!!err || bytes_transferred >= size_) ? 0 : (size_ - bytes_transferred < default_max_transfer_size ? size_ - bytes_transferred : std::size_t(default_max_transfer_size)); } private: std::size_t size_; }; } // namespace detail /** * @defgroup completion_condition Completion Condition Function Objects * * Function objects used for determining when a read or write operation should * complete. */ /*@{*/ /// Return a completion condition function object that indicates that a read or /// write operation should continue until all of the data has been transferred, /// or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_all(), ec); * if (ec) * { * // An error occurred. * } * else * { * // n == 128 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_all(); #else inline detail::transfer_all_t transfer_all() { return detail::transfer_all_t(); } #endif /// Return a completion condition function object that indicates that a read or /// write operation should continue until a minimum number of bytes has been /// transferred, or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full or contains at least 64 bytes: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_at_least(64), ec); * if (ec) * { * // An error occurred. * } * else * { * // n >= 64 && n <= 128 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_at_least(std::size_t minimum); #else inline detail::transfer_at_least_t transfer_at_least(std::size_t minimum) { return detail::transfer_at_least_t(minimum); } #endif /// Return a completion condition function object that indicates that a read or /// write operation should continue until an exact number of bytes has been /// transferred, or until an error occurs. /** * This function is used to create an object, of unspecified type, that meets * CompletionCondition requirements. * * @par Example * Reading until a buffer is full or contains exactly 64 bytes: * @code * boost::array buf; * asio::error_code ec; * std::size_t n = asio::read( * sock, asio::buffer(buf), * asio::transfer_exactly(64), ec); * if (ec) * { * // An error occurred. * } * else * { * // n == 64 * } * @endcode */ #if defined(GENERATING_DOCUMENTATION) unspecified transfer_exactly(std::size_t size); #else inline detail::transfer_exactly_t transfer_exactly(std::size_t size) { return detail::transfer_exactly_t(size); } #endif /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_COMPLETION_CONDITION_HPP galera-4-26.4.25/asio/asio/thread.hpp000644 000164 177776 00000004405 15107057155 020365 0ustar00jenkinsnogroup000000 000000 // // thread.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_THREAD_HPP #define ASIO_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A simple abstraction for starting threads. /** * The asio::thread class implements the smallest possible subset of the * functionality of boost::thread. It is intended to be used only for starting * a thread and waiting for it to exit. If more extensive threading * capabilities are required, you are strongly advised to use something else. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * A typical use of asio::thread would be to launch a thread to run an * io_context's event processing loop: * * @par * @code asio::io_context io_context; * // ... * asio::thread t(boost::bind(&asio::io_context::run, &io_context)); * // ... * t.join(); @endcode */ class thread : private noncopyable { public: /// Start a new thread that executes the supplied function. /** * This constructor creates a new thread that will execute the given function * or function object. * * @param f The function or function object to be run in the thread. The * function signature must be: @code void f(); @endcode */ template explicit thread(Function f) : impl_(f) { } /// Destructor. ~thread() { } /// Wait for the thread to exit. /** * This function will block until the thread has exited. * * If this function is not called before the thread object is destroyed, the * thread itself will continue to run until completion. You will, however, * no longer have the ability to wait for it to exit. */ void join() { impl_.join(); } private: detail::thread impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_THREAD_HPP galera-4-26.4.25/asio/asio/wait_traits.hpp000644 000164 177776 00000002546 15107057155 021454 0ustar00jenkinsnogroup000000 000000 // // wait_traits.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WAIT_TRAITS_HPP #define ASIO_WAIT_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { /// Wait traits suitable for use with the basic_waitable_timer class template. template struct wait_traits { /// Convert a clock duration into a duration used for waiting. /** * @returns @c d. */ static typename Clock::duration to_wait_duration( const typename Clock::duration& d) { return d; } /// Convert a clock duration into a duration used for waiting. /** * @returns @c d. */ static typename Clock::duration to_wait_duration( const typename Clock::time_point& t) { typename Clock::time_point now = Clock::now(); if (now + (Clock::duration::max)() < t) return (Clock::duration::max)(); if (now + (Clock::duration::min)() > t) return (Clock::duration::min)(); return t - now; } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_WAIT_TRAITS_HPP galera-4-26.4.25/asio/asio/buffered_stream.hpp000644 000164 177776 00000017336 15107057155 022262 0ustar00jenkinsnogroup000000 000000 // // buffered_stream.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_HPP #define ASIO_BUFFERED_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffered_read_stream.hpp" #include "asio/buffered_write_stream.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the read- and write-related operations of a stream. /** * The buffered_stream class template can be used to add buffering to the * synchronous and asynchronous read and write operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the executor associated with the object. typedef typename lowest_layer_type::executor_type executor_type; /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a) : inner_stream_impl_(a), stream_impl_(inner_stream_impl_) { } /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_stream(Arg& a, std::size_t read_buffer_size, std::size_t write_buffer_size) : inner_stream_impl_(a, write_buffer_size), stream_impl_(inner_stream_impl_, read_buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return stream_impl_.next_layer().next_layer(); } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return stream_impl_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return stream_impl_.lowest_layer(); } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return stream_impl_.lowest_layer().get_executor(); } /// Close the stream. void close() { stream_impl_.close(); } /// Close the stream. ASIO_SYNC_OP_VOID close(asio::error_code& ec) { stream_impl_.close(ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation. Throws an /// exception on failure. std::size_t flush() { return stream_impl_.next_layer().flush(); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation, or 0 if an /// error occurred. std::size_t flush(asio::error_code& ec) { return stream_impl_.next_layer().flush(ec); } /// Start an asynchronous flush. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_flush(ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.next_layer().async_flush( ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers) { return stream_impl_.write_some(buffers); } /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.write_some(buffers, ec); } /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return stream_impl_.async_write_some(buffers, ASIO_MOVE_CAST(WriteHandler)(handler)); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation. Throws an exception on failure. std::size_t fill() { return stream_impl_.fill(); } /// Fill the buffer with some data. Returns the number of bytes placed in the /// buffer as a result of the operation, or 0 if an error occurred. std::size_t fill(asio::error_code& ec) { return stream_impl_.fill(ec); } /// Start an asynchronous fill. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_fill(ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_fill(ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers) { return stream_impl_.read_some(buffers); } /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.read_some(buffers, ec); } /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return stream_impl_.async_read_some(buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers) { return stream_impl_.peek(buffers); } /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return stream_impl_.peek(buffers, ec); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return stream_impl_.in_avail(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { return stream_impl_.in_avail(ec); } private: // The buffered write stream. typedef buffered_write_stream write_stream_type; write_stream_type inner_stream_impl_; // The buffered read stream. typedef buffered_read_stream read_stream_type; read_stream_type stream_impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFERED_STREAM_HPP galera-4-26.4.25/asio/asio/signal_set.hpp000644 000164 177776 00000001247 15107057155 021247 0ustar00jenkinsnogroup000000 000000 // // signal_set.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SIGNAL_SET_HPP #define ASIO_SIGNAL_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_signal_set.hpp" namespace asio { /// Typedef for the typical usage of a signal set. typedef basic_signal_set<> signal_set; } // namespace asio #endif // ASIO_SIGNAL_SET_HPP galera-4-26.4.25/asio/asio/basic_streambuf_fwd.hpp000644 000164 177776 00000001515 15107057155 023106 0ustar00jenkinsnogroup000000 000000 // // basic_streambuf_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_STREAMBUF_FWD_HPP #define ASIO_BASIC_STREAMBUF_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include namespace asio { template > class basic_streambuf; template > class basic_streambuf_ref; } // namespace asio #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_BASIC_STREAMBUF_FWD_HPP galera-4-26.4.25/asio/asio/basic_seq_packet_socket.hpp000644 000164 177776 00000065230 15107057155 023751 0ustar00jenkinsnogroup000000 000000 // // basic_seq_packet_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #define ASIO_BASIC_SEQ_PACKET_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL) #define ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_seq_packet_socket; #endif // !defined(ASIO_BASIC_SEQ_PACKET_SOCKET_FWD_DECL) /// Provides sequenced packet socket functionality. /** * The basic_seq_packet_socket class template provides asynchronous and blocking * sequenced packet socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_seq_packet_socket : public basic_socket { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Rebinds the socket type to another executor. template struct rebind_executor { /// The socket type when rebound to the specified executor. typedef basic_seq_packet_socket other; }; /// The native representation of a socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename basic_socket::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_seq_packet_socket without opening it. /** * This constructor creates a sequenced packet socket without opening it. The * socket needs to be opened and then connected or accepted before data can * be sent or received on it. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_seq_packet_socket(const executor_type& ex) : basic_socket(ex) { } /// Construct a basic_seq_packet_socket without opening it. /** * This constructor creates a sequenced packet socket without opening it. The * socket needs to be opened and then connected or accepted before data can * be sent or received on it. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. */ template explicit basic_seq_packet_socket(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context) { } /// Construct and open a basic_seq_packet_socket. /** * This constructor creates and opens a sequenced_packet socket. The socket * needs to be connected or accepted before data can be sent or received on * it. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(const executor_type& ex, const protocol_type& protocol) : basic_socket(ex, protocol) { } /// Construct and open a basic_seq_packet_socket. /** * This constructor creates and opens a sequenced_packet socket. The socket * needs to be connected or accepted before data can be sent or received on * it. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_seq_packet_socket(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol) { } /// Construct a basic_seq_packet_socket, opening it and binding it to the /// given local endpoint. /** * This constructor creates a sequenced packet socket and automatically opens * it bound to the specified endpoint on the local machine. The protocol used * is the protocol associated with the given endpoint. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the sequenced * packet socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(const executor_type& ex, const endpoint_type& endpoint) : basic_socket(ex, endpoint) { } /// Construct a basic_seq_packet_socket, opening it and binding it to the /// given local endpoint. /** * This constructor creates a sequenced packet socket and automatically opens * it bound to the specified endpoint on the local machine. The protocol used * is the protocol associated with the given endpoint. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the sequenced * packet socket will be bound. * * @throws asio::system_error Thrown on failure. */ template basic_seq_packet_socket(ExecutionContext& context, const endpoint_type& endpoint, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, endpoint) { } /// Construct a basic_seq_packet_socket on an existing native socket. /** * This constructor creates a sequenced packet socket object to hold an * existing native socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_seq_packet_socket(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket(ex, protocol, native_socket) { } /// Construct a basic_seq_packet_socket on an existing native socket. /** * This constructor creates a sequenced packet socket object to hold an * existing native socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ template basic_seq_packet_socket(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_socket, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_seq_packet_socket from another. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(const executor_type&) * constructor. */ basic_seq_packet_socket(basic_seq_packet_socket&& other) : basic_socket(std::move(other)) { } /// Move-assign a basic_seq_packet_socket from another. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(const executor_type&) * constructor. */ basic_seq_packet_socket& operator=(basic_seq_packet_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } /// Move-construct a basic_seq_packet_socket from a socket of another protocol /// type. /** * This constructor moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(const executor_type&) * constructor. */ template basic_seq_packet_socket(basic_seq_packet_socket&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : basic_socket(std::move(other)) { } /// Move-assign a basic_seq_packet_socket from a socket of another protocol /// type. /** * This assignment operator moves a sequenced packet socket from one object to * another. * * @param other The other basic_seq_packet_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_seq_packet_socket(const executor_type&) * constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_seq_packet_socket& >::type operator=(basic_seq_packet_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the socket. /** * This function destroys the socket, cancelling any outstanding asynchronous * operations associated with the socket as if by calling @c cancel. */ ~basic_seq_packet_socket() { } /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block until the data has been sent successfully, or an * until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the sequenced packet socket. The * function call will block the data has been sent successfully, or an until * error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. Returns 0 if an error occurred. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the sequenced packet * socket. The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, flags); } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_with_flags( this->impl_.get_implementation(), buffers, 0, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on the socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), 0, out_flags); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive_with_flags( this->impl_.get_implementation(), buffers, in_flags, out_flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the sequenced packet socket. The * function call will block until data has been received successfully, or * until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags After the receive call completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. Returns 0 if an error occurred. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { return this->impl_.get_service().receive_with_flags( this->impl_.get_implementation(), buffers, in_flags, out_flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * packet socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_with_flags(), handler, this, buffers, socket_base::message_flags(0), &out_flags); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the sequenced * data socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param in_flags Flags specifying how the receive call is to be made. * * @param out_flags Once the asynchronous operation completes, contains flags * associated with the received data. For example, if the * socket_base::message_end_of_record bit is set then the received data marks * the end of a record. The caller must guarantee that the referenced * variable remains valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive( * asio::buffer(data, size), * 0, out_flags, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive_with_flags(), handler, this, buffers, in_flags, &out_flags); } private: struct initiate_async_send { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_seq_packet_socket* self, const ConstBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive_with_flags { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_seq_packet_socket* self, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags* out_flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive_with_flags( self->impl_.get_implementation(), buffers, in_flags, *out_flags, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SEQ_PACKET_SOCKET_HPP galera-4-26.4.25/asio/asio/yield.hpp000644 000164 177776 00000000720 15107057155 020220 0ustar00jenkinsnogroup000000 000000 // // yield.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #include "coroutine.hpp" #ifndef reenter # define reenter(c) ASIO_CORO_REENTER(c) #endif #ifndef yield # define yield ASIO_CORO_YIELD #endif #ifndef fork # define fork ASIO_CORO_FORK #endif galera-4-26.4.25/asio/asio/buffers_iterator.hpp000644 000164 177776 00000033052 15107057155 022463 0ustar00jenkinsnogroup000000 000000 // // buffers_iterator.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERS_ITERATOR_HPP #define ASIO_BUFFERS_ITERATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct buffers_iterator_types_helper; template <> struct buffers_iterator_types_helper { typedef const_buffer buffer_type; template struct byte_type { typedef typename add_const::type type; }; }; template <> struct buffers_iterator_types_helper { typedef mutable_buffer buffer_type; template struct byte_type { typedef ByteType type; }; }; template struct buffers_iterator_types { enum { is_mutable = is_convertible< typename BufferSequence::value_type, mutable_buffer>::value }; typedef buffers_iterator_types_helper helper; typedef typename helper::buffer_type buffer_type; typedef typename helper::template byte_type::type byte_type; typedef typename BufferSequence::const_iterator const_iterator; }; template struct buffers_iterator_types { typedef mutable_buffer buffer_type; typedef ByteType byte_type; typedef const mutable_buffer* const_iterator; }; template struct buffers_iterator_types { typedef const_buffer buffer_type; typedef typename add_const::type byte_type; typedef const const_buffer* const_iterator; }; #if !defined(ASIO_NO_DEPRECATED) template struct buffers_iterator_types { typedef mutable_buffer buffer_type; typedef ByteType byte_type; typedef const mutable_buffer* const_iterator; }; template struct buffers_iterator_types { typedef const_buffer buffer_type; typedef typename add_const::type byte_type; typedef const const_buffer* const_iterator; }; #endif // !defined(ASIO_NO_DEPRECATED) } /// A random access iterator over the bytes in a buffer sequence. template class buffers_iterator { private: typedef typename detail::buffers_iterator_types< BufferSequence, ByteType>::buffer_type buffer_type; typedef typename detail::buffers_iterator_types::const_iterator buffer_sequence_iterator_type; public: /// The type used for the distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of the value pointed to by the iterator. typedef ByteType value_type; #if defined(GENERATING_DOCUMENTATION) /// The type of the result of applying operator->() to the iterator. /** * If the buffer sequence stores buffer objects that are convertible to * mutable_buffer, this is a pointer to a non-const ByteType. Otherwise, a * pointer to a const ByteType. */ typedef const_or_non_const_ByteType* pointer; #else // defined(GENERATING_DOCUMENTATION) typedef typename detail::buffers_iterator_types< BufferSequence, ByteType>::byte_type* pointer; #endif // defined(GENERATING_DOCUMENTATION) #if defined(GENERATING_DOCUMENTATION) /// The type of the result of applying operator*() to the iterator. /** * If the buffer sequence stores buffer objects that are convertible to * mutable_buffer, this is a reference to a non-const ByteType. Otherwise, a * reference to a const ByteType. */ typedef const_or_non_const_ByteType& reference; #else // defined(GENERATING_DOCUMENTATION) typedef typename detail::buffers_iterator_types< BufferSequence, ByteType>::byte_type& reference; #endif // defined(GENERATING_DOCUMENTATION) /// The iterator category. typedef std::random_access_iterator_tag iterator_category; /// Default constructor. Creates an iterator in an undefined state. buffers_iterator() : current_buffer_(), current_buffer_position_(0), begin_(), current_(), end_(), position_(0) { } /// Construct an iterator representing the beginning of the buffers' data. static buffers_iterator begin(const BufferSequence& buffers) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3) __attribute__ ((__noinline__)) #endif // defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3) { buffers_iterator new_iter; new_iter.begin_ = asio::buffer_sequence_begin(buffers); new_iter.current_ = asio::buffer_sequence_begin(buffers); new_iter.end_ = asio::buffer_sequence_end(buffers); while (new_iter.current_ != new_iter.end_) { new_iter.current_buffer_ = *new_iter.current_; if (new_iter.current_buffer_.size() > 0) break; ++new_iter.current_; } return new_iter; } /// Construct an iterator representing the end of the buffers' data. static buffers_iterator end(const BufferSequence& buffers) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3) __attribute__ ((__noinline__)) #endif // defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 3) { buffers_iterator new_iter; new_iter.begin_ = asio::buffer_sequence_begin(buffers); new_iter.current_ = asio::buffer_sequence_begin(buffers); new_iter.end_ = asio::buffer_sequence_end(buffers); while (new_iter.current_ != new_iter.end_) { buffer_type buffer = *new_iter.current_; new_iter.position_ += buffer.size(); ++new_iter.current_; } return new_iter; } /// Dereference an iterator. reference operator*() const { return dereference(); } /// Dereference an iterator. pointer operator->() const { return &dereference(); } /// Access an individual element. reference operator[](std::ptrdiff_t difference) const { buffers_iterator tmp(*this); tmp.advance(difference); return *tmp; } /// Increment operator (prefix). buffers_iterator& operator++() { increment(); return *this; } /// Increment operator (postfix). buffers_iterator operator++(int) { buffers_iterator tmp(*this); ++*this; return tmp; } /// Decrement operator (prefix). buffers_iterator& operator--() { decrement(); return *this; } /// Decrement operator (postfix). buffers_iterator operator--(int) { buffers_iterator tmp(*this); --*this; return tmp; } /// Addition operator. buffers_iterator& operator+=(std::ptrdiff_t difference) { advance(difference); return *this; } /// Subtraction operator. buffers_iterator& operator-=(std::ptrdiff_t difference) { advance(-difference); return *this; } /// Addition operator. friend buffers_iterator operator+(const buffers_iterator& iter, std::ptrdiff_t difference) { buffers_iterator tmp(iter); tmp.advance(difference); return tmp; } /// Addition operator. friend buffers_iterator operator+(std::ptrdiff_t difference, const buffers_iterator& iter) { buffers_iterator tmp(iter); tmp.advance(difference); return tmp; } /// Subtraction operator. friend buffers_iterator operator-(const buffers_iterator& iter, std::ptrdiff_t difference) { buffers_iterator tmp(iter); tmp.advance(-difference); return tmp; } /// Subtraction operator. friend std::ptrdiff_t operator-(const buffers_iterator& a, const buffers_iterator& b) { return b.distance_to(a); } /// Test two iterators for equality. friend bool operator==(const buffers_iterator& a, const buffers_iterator& b) { return a.equal(b); } /// Test two iterators for inequality. friend bool operator!=(const buffers_iterator& a, const buffers_iterator& b) { return !a.equal(b); } /// Compare two iterators. friend bool operator<(const buffers_iterator& a, const buffers_iterator& b) { return a.distance_to(b) > 0; } /// Compare two iterators. friend bool operator<=(const buffers_iterator& a, const buffers_iterator& b) { return !(b < a); } /// Compare two iterators. friend bool operator>(const buffers_iterator& a, const buffers_iterator& b) { return b < a; } /// Compare two iterators. friend bool operator>=(const buffers_iterator& a, const buffers_iterator& b) { return !(a < b); } private: // Dereference the iterator. reference dereference() const { return static_cast( current_buffer_.data())[current_buffer_position_]; } // Compare two iterators for equality. bool equal(const buffers_iterator& other) const { return position_ == other.position_; } // Increment the iterator. void increment() { ASIO_ASSERT(current_ != end_ && "iterator out of bounds"); ++position_; // Check if the increment can be satisfied by the current buffer. ++current_buffer_position_; if (current_buffer_position_ != current_buffer_.size()) return; // Find the next non-empty buffer. ++current_; current_buffer_position_ = 0; while (current_ != end_) { current_buffer_ = *current_; if (current_buffer_.size() > 0) return; ++current_; } } // Decrement the iterator. void decrement() { ASIO_ASSERT(position_ > 0 && "iterator out of bounds"); --position_; // Check if the decrement can be satisfied by the current buffer. if (current_buffer_position_ != 0) { --current_buffer_position_; return; } // Find the previous non-empty buffer. buffer_sequence_iterator_type iter = current_; while (iter != begin_) { --iter; buffer_type buffer = *iter; std::size_t buffer_size = buffer.size(); if (buffer_size > 0) { current_ = iter; current_buffer_ = buffer; current_buffer_position_ = buffer_size - 1; return; } } } // Advance the iterator by the specified distance. void advance(std::ptrdiff_t n) { if (n > 0) { ASIO_ASSERT(current_ != end_ && "iterator out of bounds"); for (;;) { std::ptrdiff_t current_buffer_balance = current_buffer_.size() - current_buffer_position_; // Check if the advance can be satisfied by the current buffer. if (current_buffer_balance > n) { position_ += n; current_buffer_position_ += n; return; } // Update position. n -= current_buffer_balance; position_ += current_buffer_balance; // Move to next buffer. If it is empty then it will be skipped on the // next iteration of this loop. if (++current_ == end_) { ASIO_ASSERT(n == 0 && "iterator out of bounds"); current_buffer_ = buffer_type(); current_buffer_position_ = 0; return; } current_buffer_ = *current_; current_buffer_position_ = 0; } } else if (n < 0) { std::size_t abs_n = -n; ASIO_ASSERT(position_ >= abs_n && "iterator out of bounds"); for (;;) { // Check if the advance can be satisfied by the current buffer. if (current_buffer_position_ >= abs_n) { position_ -= abs_n; current_buffer_position_ -= abs_n; return; } // Update position. abs_n -= current_buffer_position_; position_ -= current_buffer_position_; // Check if we've reached the beginning of the buffers. if (current_ == begin_) { ASIO_ASSERT(abs_n == 0 && "iterator out of bounds"); current_buffer_position_ = 0; return; } // Find the previous non-empty buffer. buffer_sequence_iterator_type iter = current_; while (iter != begin_) { --iter; buffer_type buffer = *iter; std::size_t buffer_size = buffer.size(); if (buffer_size > 0) { current_ = iter; current_buffer_ = buffer; current_buffer_position_ = buffer_size; break; } } } } } // Determine the distance between two iterators. std::ptrdiff_t distance_to(const buffers_iterator& other) const { return other.position_ - position_; } buffer_type current_buffer_; std::size_t current_buffer_position_; buffer_sequence_iterator_type begin_; buffer_sequence_iterator_type current_; buffer_sequence_iterator_type end_; std::size_t position_; }; /// Construct an iterator representing the beginning of the buffers' data. template inline buffers_iterator buffers_begin( const BufferSequence& buffers) { return buffers_iterator::begin(buffers); } /// Construct an iterator representing the end of the buffers' data. template inline buffers_iterator buffers_end( const BufferSequence& buffers) { return buffers_iterator::end(buffers); } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFERS_ITERATOR_HPP galera-4-26.4.25/asio/asio/this_coro.hpp000644 000164 177776 00000002040 15107057155 021100 0ustar00jenkinsnogroup000000 000000 // // this_coro.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_THIS_CORO_HPP #define ASIO_THIS_CORO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace this_coro { /// Awaitable type that returns the executor of the current coroutine. struct executor_t { ASIO_CONSTEXPR executor_t() { } }; /// Awaitable object that returns the executor of the current coroutine. #if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION) constexpr executor_t executor; #elif defined(ASIO_MSVC) __declspec(selectany) executor_t executor; #endif } // namespace this_coro } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_THIS_CORO_HPP galera-4-26.4.25/asio/asio/system_executor.hpp000644 000164 177776 00000010245 15107057155 022357 0ustar00jenkinsnogroup000000 000000 // // system_executor.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SYSTEM_EXECUTOR_HPP #define ASIO_SYSTEM_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { class system_context; /// An executor that uses arbitrary threads. /** * The system executor represents an execution context where functions are * permitted to run on arbitrary threads. The post() and defer() functions * schedule the function to run on an unspecified system thread pool, and * dispatch() invokes the function immediately. */ class system_executor { public: /// Obtain the underlying execution context. system_context& context() const ASIO_NOEXCEPT; /// Inform the executor that it has some outstanding work to do. /** * For the system executor, this is a no-op. */ void on_work_started() const ASIO_NOEXCEPT { } /// Inform the executor that some work is no longer outstanding. /** * For the system executor, this is a no-op. */ void on_work_finished() const ASIO_NOEXCEPT { } /// Request the system executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will always be executed inside this function. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the system executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled to run on an unspecified system thread pool. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the system executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled to run on an unspecified system thread pool. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Compare two executors for equality. /** * System executors always compare equal. */ friend bool operator==(const system_executor&, const system_executor&) ASIO_NOEXCEPT { return true; } /// Compare two executors for inequality. /** * System executors always compare equal. */ friend bool operator!=(const system_executor&, const system_executor&) ASIO_NOEXCEPT { return false; } }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/system_executor.hpp" #endif // ASIO_SYSTEM_EXECUTOR_HPP galera-4-26.4.25/asio/asio/io_context.hpp000644 000164 177776 00000076754 15107057155 021311 0ustar00jenkinsnogroup000000 000000 // // io_context.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_CONTEXT_HPP #define ASIO_IO_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/async_result.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/error_code.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_CHRONO) # include "asio/detail/chrono.hpp" #endif // defined(ASIO_HAS_CHRONO) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/winsock_init.hpp" #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) # include "asio/detail/signal_init.hpp" #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context io_context_impl; class win_iocp_overlapped_ptr; #else typedef class scheduler io_context_impl; #endif } // namespace detail /// Provides core I/O functionality. /** * The io_context class provides the core I/O functionality for users of the * asynchronous I/O objects, including: * * @li asio::ip::tcp::socket * @li asio::ip::tcp::acceptor * @li asio::ip::udp::socket * @li asio::deadline_timer. * * The io_context class also includes facilities intended for developers of * custom asynchronous services. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe, with the specific exceptions of the restart() * and notify_fork() functions. Calling restart() while there are unfinished * run(), run_one(), run_for(), run_until(), poll() or poll_one() calls results * in undefined behaviour. The notify_fork() function should not be called * while any io_context function, or any function on an I/O object that is * associated with the io_context, is being called in another thread. * * @par Concepts: * Dispatcher. * * @par Synchronous and asynchronous operations * * Synchronous operations on I/O objects implicitly run the io_context object * for an individual operation. The io_context functions run(), run_one(), * run_for(), run_until(), poll() or poll_one() must be called for the * io_context to perform asynchronous operations on behalf of a C++ program. * Notification that an asynchronous operation has completed is delivered by * invocation of the associated handler. Handlers are invoked only by a thread * that is currently calling any overload of run(), run_one(), run_for(), * run_until(), poll() or poll_one() for the io_context. * * @par Effect of exceptions thrown from handlers * * If an exception is thrown from a handler, the exception is allowed to * propagate through the throwing thread's invocation of run(), run_one(), * run_for(), run_until(), poll() or poll_one(). No other threads that are * calling any of these functions are affected. It is then the responsibility * of the application to catch the exception. * * After the exception has been caught, the run(), run_one(), run_for(), * run_until(), poll() or poll_one() call may be restarted @em without the need * for an intervening call to restart(). This allows the thread to rejoin the * io_context object's thread pool without impacting any other threads in the * pool. * * For example: * * @code * asio::io_context io_context; * ... * for (;;) * { * try * { * io_context.run(); * break; // run() exited normally * } * catch (my_exception& e) * { * // Deal with exception as appropriate. * } * } * @endcode * * @par Submitting arbitrary tasks to the io_context * * To submit functions to the io_context, use the @ref asio::dispatch, * @ref asio::post or @ref asio::defer free functions. * * For example: * * @code void my_task() * { * ... * } * * ... * * asio::io_context io_context; * * // Submit a function to the io_context. * asio::post(io_context, my_task); * * // Submit a lambda object to the io_context. * asio::post(io_context, * []() * { * ... * }); * * // Run the io_context until it runs out of work. * io_context.run(); @endcode * * @par Stopping the io_context from running out of work * * Some applications may need to prevent an io_context object's run() call from * returning when there is no more work to do. For example, the io_context may * be being run in a background thread that is launched prior to the * application's asynchronous operations. The run() call may be kept running by * creating an object of type * asio::executor_work_guard: * * @code asio::io_context io_context; * asio::executor_work_guard * = asio::make_work_guard(io_context); * ... @endcode * * To effect a shutdown, the application will then need to call the io_context * object's stop() member function. This will cause the io_context run() call * to return as soon as possible, abandoning unfinished operations and without * permitting ready handlers to be dispatched. * * Alternatively, if the application requires that all operations and handlers * be allowed to finish normally, the work object may be explicitly reset. * * @code asio::io_context io_context; * asio::executor_work_guard * = asio::make_work_guard(io_context); * ... * work.reset(); // Allow run() to exit. @endcode */ class io_context : public execution_context { private: typedef detail::io_context_impl impl_type; #if defined(ASIO_HAS_IOCP) friend class detail::win_iocp_overlapped_ptr; #endif public: class executor_type; friend class executor_type; #if !defined(ASIO_NO_DEPRECATED) class work; friend class work; #endif // !defined(ASIO_NO_DEPRECATED) class service; #if !defined(ASIO_NO_EXTENSIONS) class strand; #endif // !defined(ASIO_NO_EXTENSIONS) /// The type used to count the number of handlers executed by the context. typedef std::size_t count_type; /// Constructor. ASIO_DECL io_context(); /// Constructor. /** * Construct with a hint about the required level of concurrency. * * @param concurrency_hint A suggestion to the implementation on how many * threads it should allow to run simultaneously. */ ASIO_DECL explicit io_context(int concurrency_hint); /// Destructor. /** * On destruction, the io_context performs the following sequence of * operations: * * @li For each service object @c svc in the io_context set, in reverse order * of the beginning of service object lifetime, performs * @c svc->shutdown(). * * @li Uninvoked handler objects that were scheduled for deferred invocation * on the io_context, or any associated strand, are destroyed. * * @li For each service object @c svc in the io_context set, in reverse order * of the beginning of service object lifetime, performs * delete static_cast(svc). * * @note The destruction sequence described above permits programs to * simplify their resource management by using @c shared_ptr<>. Where an * object's lifetime is tied to the lifetime of a connection (or some other * sequence of asynchronous operations), a @c shared_ptr to the object would * be bound into the handlers for all asynchronous operations associated with * it. This works as follows: * * @li When a single connection ends, all associated asynchronous operations * complete. The corresponding handler objects are destroyed, and all * @c shared_ptr references to the objects are destroyed. * * @li To shut down the whole program, the io_context function stop() is * called to terminate any run() calls as soon as possible. The io_context * destructor defined above destroys all handlers, causing all @c shared_ptr * references to all connection objects to be destroyed. */ ASIO_DECL ~io_context(); /// Obtains the executor associated with the io_context. executor_type get_executor() ASIO_NOEXCEPT; /// Run the io_context object's event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_context has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_context may execute handlers. All threads that are * waiting in the pool are equivalent and the io_context may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_context object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to restart(). * * @return The number of handlers that were executed. * * @note Calling the run() function from a thread that is currently calling * one of run(), run_one(), run_for(), run_until(), poll() or poll_one() on * the same io_context object may introduce the potential for deadlock. It is * the caller's reponsibility to avoid this. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL count_type run(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Run the io_context object's /// event processing loop. /** * The run() function blocks until all work has finished and there are no * more handlers to be dispatched, or until the io_context has been stopped. * * Multiple threads may call the run() function to set up a pool of threads * from which the io_context may execute handlers. All threads that are * waiting in the pool are equivalent and the io_context may choose any one * of them to invoke a handler. * * A normal exit from the run() function implies that the io_context object * is stopped (the stopped() function returns @c true). Subsequent calls to * run(), run_one(), poll() or poll_one() will return immediately unless there * is a prior call to restart(). * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. * * @note Calling the run() function from a thread that is currently calling * one of run(), run_one(), run_for(), run_until(), poll() or poll_one() on * the same io_context object may introduce the potential for deadlock. It is * the caller's reponsibility to avoid this. * * The poll() function may also be used to dispatch ready handlers, but * without blocking. */ ASIO_DECL count_type run(asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) #if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) /// Run the io_context object's event processing loop for a specified /// duration. /** * The run_for() function blocks until all work has finished and there are no * more handlers to be dispatched, until the io_context has been stopped, or * until the specified duration has elapsed. * * @param rel_time The duration for which the call may block. * * @return The number of handlers that were executed. */ template std::size_t run_for(const chrono::duration& rel_time); /// Run the io_context object's event processing loop until a specified time. /** * The run_until() function blocks until all work has finished and there are * no more handlers to be dispatched, until the io_context has been stopped, * or until the specified time has been reached. * * @param abs_time The time point until which the call may block. * * @return The number of handlers that were executed. */ template std::size_t run_until(const chrono::time_point& abs_time); #endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) /// Run the io_context object's event processing loop to execute at most one /// handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_context has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_context object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * restart(). * * @note Calling the run_one() function from a thread that is currently * calling one of run(), run_one(), run_for(), run_until(), poll() or * poll_one() on the same io_context object may introduce the potential for * deadlock. It is the caller's reponsibility to avoid this. */ ASIO_DECL count_type run_one(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overlaod.) Run the io_context object's /// event processing loop to execute at most one handler. /** * The run_one() function blocks until one handler has been dispatched, or * until the io_context has been stopped. * * @return The number of handlers that were executed. A zero return value * implies that the io_context object is stopped (the stopped() function * returns @c true). Subsequent calls to run(), run_one(), poll() or * poll_one() will return immediately unless there is a prior call to * restart(). * * @return The number of handlers that were executed. * * @note Calling the run_one() function from a thread that is currently * calling one of run(), run_one(), run_for(), run_until(), poll() or * poll_one() on the same io_context object may introduce the potential for * deadlock. It is the caller's reponsibility to avoid this. */ ASIO_DECL count_type run_one(asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) #if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) /// Run the io_context object's event processing loop for a specified duration /// to execute at most one handler. /** * The run_one_for() function blocks until one handler has been dispatched, * until the io_context has been stopped, or until the specified duration has * elapsed. * * @param rel_time The duration for which the call may block. * * @return The number of handlers that were executed. */ template std::size_t run_one_for(const chrono::duration& rel_time); /// Run the io_context object's event processing loop until a specified time /// to execute at most one handler. /** * The run_one_until() function blocks until one handler has been dispatched, * until the io_context has been stopped, or until the specified time has * been reached. * * @param abs_time The time point until which the call may block. * * @return The number of handlers that were executed. */ template std::size_t run_one_until( const chrono::time_point& abs_time); #endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) /// Run the io_context object's event processing loop to execute ready /// handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_context has been stopped or there are no more ready handlers. * * @return The number of handlers that were executed. */ ASIO_DECL count_type poll(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Run the io_context object's /// event processing loop to execute ready handlers. /** * The poll() function runs handlers that are ready to run, without blocking, * until the io_context has been stopped or there are no more ready handlers. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL count_type poll(asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) /// Run the io_context object's event processing loop to execute one ready /// handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @return The number of handlers that were executed. */ ASIO_DECL count_type poll_one(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Run the io_context object's /// event processing loop to execute one ready handler. /** * The poll_one() function runs at most one handler that is ready to run, * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of handlers that were executed. */ ASIO_DECL count_type poll_one(asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) /// Stop the io_context object's event processing loop. /** * This function does not block, but instead simply signals the io_context to * stop. All invocations of its run() or run_one() member functions should * return as soon as possible. Subsequent calls to run(), run_one(), poll() * or poll_one() will return immediately until restart() is called. */ ASIO_DECL void stop(); /// Determine whether the io_context object has been stopped. /** * This function is used to determine whether an io_context object has been * stopped, either through an explicit call to stop(), or due to running out * of work. When an io_context object is stopped, calls to run(), run_one(), * poll() or poll_one() will return immediately without invoking any * handlers. * * @return @c true if the io_context object is stopped, otherwise @c false. */ ASIO_DECL bool stopped() const; /// Restart the io_context in preparation for a subsequent run() invocation. /** * This function must be called prior to any second or later set of * invocations of the run(), run_one(), poll() or poll_one() functions when a * previous invocation of these functions returned due to the io_context * being stopped or running out of work. After a call to restart(), the * io_context object's stopped() function will return @c false. * * This function must not be called while there are any unfinished calls to * the run(), run_one(), poll() or poll_one() functions. */ ASIO_DECL void restart(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use restart().) Reset the io_context in preparation for a /// subsequent run() invocation. /** * This function must be called prior to any second or later set of * invocations of the run(), run_one(), poll() or poll_one() functions when a * previous invocation of these functions returned due to the io_context * being stopped or running out of work. After a call to restart(), the * io_context object's stopped() function will return @c false. * * This function must not be called while there are any unfinished calls to * the run(), run_one(), poll() or poll_one() functions. */ void reset(); /// (Deprecated: Use asio::dispatch().) Request the io_context to /// invoke the given handler. /** * This function is used to ask the io_context to execute the given handler. * * The io_context guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. The handler may be executed inside this function * if the guarantee can be met. * * @param handler The handler to be called. The io_context will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) dispatch(ASIO_MOVE_ARG(LegacyCompletionHandler) handler); /// (Deprecated: Use asio::post().) Request the io_context to invoke /// the given handler and return immediately. /** * This function is used to ask the io_context to execute the given handler, * but without allowing the io_context to call the handler from inside this * function. * * The io_context guarantees that the handler will only be called in a thread * in which the run(), run_one(), poll() or poll_one() member functions is * currently being invoked. * * @param handler The handler to be called. The io_context will make * a copy of the handler object as required. The function signature of the * handler must be: @code void handler(); @endcode * * @note This function throws an exception only if: * * @li the handler's @c asio_handler_allocate function; or * * @li the handler's copy constructor * * throws an exception. */ template ASIO_INITFN_RESULT_TYPE(LegacyCompletionHandler, void ()) post(ASIO_MOVE_ARG(LegacyCompletionHandler) handler); /// (Deprecated: Use asio::bind_executor().) Create a new handler that /// automatically dispatches the wrapped handler on the io_context. /** * This function is used to create a new handler function object that, when * invoked, will automatically pass the wrapped handler to the io_context * object's dispatch function. * * @param handler The handler to be wrapped. The io_context will make a copy * of the handler object as required. The function signature of the handler * must be: @code void handler(A1 a1, ... An an); @endcode * * @return A function object that, when invoked, passes the wrapped handler to * the io_context object's dispatch function. Given a function object with the * signature: * @code R f(A1 a1, ... An an); @endcode * If this function object is passed to the wrap function like so: * @code io_context.wrap(f); @endcode * then the return value is a function object with the signature * @code void g(A1 a1, ... An an); @endcode * that, when invoked, executes code equivalent to: * @code io_context.dispatch(boost::bind(f, a1, ... an)); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else detail::wrapped_handler #endif wrap(Handler handler); #endif // !defined(ASIO_NO_DEPRECATED) private: #if !defined(ASIO_NO_DEPRECATED) struct initiate_dispatch; struct initiate_post; #endif // !defined(ASIO_NO_DEPRECATED) // Helper function to add the implementation. ASIO_DECL impl_type& add_impl(impl_type* impl); // Backwards compatible overload for use with services derived from // io_context::service. template friend Service& use_service(io_context& ioc); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) detail::winsock_init<> init_; #elif defined(__sun) || defined(__QNX__) || defined(__hpux) || defined(_AIX) \ || defined(__osf__) detail::signal_init<> init_; #endif // The implementation. impl_type& impl_; }; /// Executor used to submit functions to an io_context. class io_context::executor_type { public: /// Obtain the underlying execution context. io_context& context() const ASIO_NOEXCEPT; /// Inform the io_context that it has some outstanding work to do. /** * This function is used to inform the io_context that some work has begun. * This ensures that the io_context's run() and run_one() functions do not * exit while the work is underway. */ void on_work_started() const ASIO_NOEXCEPT; /// Inform the io_context that some work is no longer outstanding. /** * This function is used to inform the io_context that some work has * finished. Once the count of unfinished work reaches zero, the io_context * is stopped and the run() and run_one() functions may exit. */ void on_work_finished() const ASIO_NOEXCEPT; /// Request the io_context to invoke the given function object. /** * This function is used to ask the io_context to execute the given function * object. If the current thread is running the io_context, @c dispatch() * executes the function before returning. Otherwise, the function will be * scheduled to run on the io_context. * * @param f The function object to be called. The executor will make a copy * of the handler object as required. The function signature of the function * object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the io_context to invoke the given function object. /** * This function is used to ask the io_context to execute the given function * object. The function object will never be executed inside @c post(). * Instead, it will be scheduled to run on the io_context. * * @param f The function object to be called. The executor will make a copy * of the handler object as required. The function signature of the function * object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the io_context to invoke the given function object. /** * This function is used to ask the io_context to execute the given function * object. The function object will never be executed inside @c defer(). * Instead, it will be scheduled to run on the io_context. * * If the current thread belongs to the io_context, @c defer() will delay * scheduling the function object until the current thread returns control to * the pool. * * @param f The function object to be called. The executor will make a copy * of the handler object as required. The function signature of the function * object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Determine whether the io_context is running in the current thread. /** * @return @c true if the current thread is running the io_context. Otherwise * returns @c false. */ bool running_in_this_thread() const ASIO_NOEXCEPT; /// Compare two executors for equality. /** * Two executors are equal if they refer to the same underlying io_context. */ friend bool operator==(const executor_type& a, const executor_type& b) ASIO_NOEXCEPT { return &a.io_context_ == &b.io_context_; } /// Compare two executors for inequality. /** * Two executors are equal if they refer to the same underlying io_context. */ friend bool operator!=(const executor_type& a, const executor_type& b) ASIO_NOEXCEPT { return &a.io_context_ != &b.io_context_; } private: friend class io_context; // Constructor. explicit executor_type(io_context& i) : io_context_(i) {} // The underlying io_context. io_context& io_context_; }; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use executor_work_guard.) Class to inform the io_context when /// it has work to do. /** * The work class is used to inform the io_context when work starts and * finishes. This ensures that the io_context object's run() function will not * exit while work is underway, and that it does exit when there is no * unfinished work remaining. * * The work class is copy-constructible so that it may be used as a data member * in a handler class. It is not assignable. */ class io_context::work { public: /// Constructor notifies the io_context that work is starting. /** * The constructor is used to inform the io_context that some work has begun. * This ensures that the io_context object's run() function will not exit * while the work is underway. */ explicit work(asio::io_context& io_context); /// Copy constructor notifies the io_context that work is starting. /** * The constructor is used to inform the io_context that some work has begun. * This ensures that the io_context object's run() function will not exit * while the work is underway. */ work(const work& other); /// Destructor notifies the io_context that the work is complete. /** * The destructor is used to inform the io_context that some work has * finished. Once the count of unfinished work reaches zero, the io_context * object's run() function is permitted to exit. */ ~work(); /// Get the io_context associated with the work. asio::io_context& get_io_context(); private: // Prevent assignment. void operator=(const work& other); // The io_context implementation. detail::io_context_impl& io_context_impl_; }; #endif // !defined(ASIO_NO_DEPRECATED) /// Base class for all io_context services. class io_context::service : public execution_context::service { public: /// Get the io_context object that owns the service. asio::io_context& get_io_context(); private: /// Destroy all user-defined handler objects owned by the service. ASIO_DECL virtual void shutdown(); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use shutdown().) Destroy all user-defined handler objects /// owned by the service. ASIO_DECL virtual void shutdown_service(); #endif // !defined(ASIO_NO_DEPRECATED) /// Handle notification of a fork-related event to perform any necessary /// housekeeping. /** * This function is not a pure virtual so that services only have to * implement it if necessary. The default implementation does nothing. */ ASIO_DECL virtual void notify_fork( execution_context::fork_event event); #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use notify_fork().) Handle notification of a fork-related /// event to perform any necessary housekeeping. /** * This function is not a pure virtual so that services only have to * implement it if necessary. The default implementation does nothing. */ ASIO_DECL virtual void fork_service( execution_context::fork_event event); #endif // !defined(ASIO_NO_DEPRECATED) protected: /// Constructor. /** * @param owner The io_context object that owns the service. */ ASIO_DECL service(asio::io_context& owner); /// Destructor. ASIO_DECL virtual ~service(); }; namespace detail { // Special service base class to keep classes header-file only. template class service_base : public asio::io_context::service { public: static asio::detail::service_id id; // Constructor. service_base(asio::io_context& io_context) : asio::io_context::service(io_context) { } }; template asio::detail::service_id service_base::id; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/io_context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/io_context.ipp" #endif // defined(ASIO_HEADER_ONLY) // If both io_context.hpp and strand.hpp have been included, automatically // include the header file needed for the io_context::strand class. #if !defined(ASIO_NO_EXTENSIONS) # if defined(ASIO_STRAND_HPP) # include "asio/io_context_strand.hpp" # endif // defined(ASIO_STRAND_HPP) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // ASIO_IO_CONTEXT_HPP galera-4-26.4.25/asio/asio/serial_port_base.hpp000644 000164 177776 00000010642 15107057155 022433 0ustar00jenkinsnogroup000000 000000 // // serial_port_base.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SERIAL_PORT_BASE_HPP #define ASIO_SERIAL_PORT_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) # include #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/socket_types.hpp" #include "asio/error_code.hpp" #if defined(GENERATING_DOCUMENTATION) # define ASIO_OPTION_STORAGE implementation_defined #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # define ASIO_OPTION_STORAGE DCB #else # define ASIO_OPTION_STORAGE termios #endif #include "asio/detail/push_options.hpp" namespace asio { /// The serial_port_base class is used as a base for the basic_serial_port class /// template so that we have a common place to define the serial port options. class serial_port_base { public: /// Serial port option to permit changing the baud rate. /** * Implements changing the baud rate for a given serial port. */ class baud_rate { public: explicit baud_rate(unsigned int rate = 0); unsigned int value() const; ASIO_DECL ASIO_SYNC_OP_VOID store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const; ASIO_DECL ASIO_SYNC_OP_VOID load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec); private: unsigned int value_; }; /// Serial port option to permit changing the flow control. /** * Implements changing the flow control for a given serial port. */ class flow_control { public: enum type { none, software, hardware }; ASIO_DECL explicit flow_control(type t = none); type value() const; ASIO_DECL ASIO_SYNC_OP_VOID store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const; ASIO_DECL ASIO_SYNC_OP_VOID load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec); private: type value_; }; /// Serial port option to permit changing the parity. /** * Implements changing the parity for a given serial port. */ class parity { public: enum type { none, odd, even }; ASIO_DECL explicit parity(type t = none); type value() const; ASIO_DECL ASIO_SYNC_OP_VOID store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const; ASIO_DECL ASIO_SYNC_OP_VOID load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec); private: type value_; }; /// Serial port option to permit changing the number of stop bits. /** * Implements changing the number of stop bits for a given serial port. */ class stop_bits { public: enum type { one, onepointfive, two }; ASIO_DECL explicit stop_bits(type t = one); type value() const; ASIO_DECL ASIO_SYNC_OP_VOID store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const; ASIO_DECL ASIO_SYNC_OP_VOID load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec); private: type value_; }; /// Serial port option to permit changing the character size. /** * Implements changing the character size for a given serial port. */ class character_size { public: ASIO_DECL explicit character_size(unsigned int t = 8); unsigned int value() const; ASIO_DECL ASIO_SYNC_OP_VOID store( ASIO_OPTION_STORAGE& storage, asio::error_code& ec) const; ASIO_DECL ASIO_SYNC_OP_VOID load( const ASIO_OPTION_STORAGE& storage, asio::error_code& ec); private: unsigned int value_; }; protected: /// Protected destructor to prevent deletion through this type. ~serial_port_base() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #undef ASIO_OPTION_STORAGE #include "asio/impl/serial_port_base.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/serial_port_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SERIAL_PORT_BASE_HPP galera-4-26.4.25/asio/asio/buffered_read_stream_fwd.hpp000644 000164 177776 00000001202 15107057155 024076 0ustar00jenkinsnogroup000000 000000 // // buffered_read_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_READ_STREAM_FWD_HPP #define ASIO_BUFFERED_READ_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_read_stream; } // namespace asio #endif // ASIO_BUFFERED_READ_STREAM_FWD_HPP galera-4-26.4.25/asio/asio/generic/000755 000164 177776 00000000000 15107057160 020012 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/generic/raw_protocol.hpp000644 000164 177776 00000005720 15107057155 023245 0ustar00jenkinsnogroup000000 000000 // // generic/raw_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_RAW_PROTOCOL_HPP #define ASIO_GENERIC_RAW_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_raw_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic raw socket. /** * The asio::generic::raw_protocol class contains flags necessary for * raw sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code raw_protocol p(AF_INET, IPPROTO_ICMP); @endcode * Constructing from a specific protocol type: * @code raw_protocol p(asio::ip::icmp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class raw_protocol { public: /// Construct a protocol object for a specific address family and protocol. raw_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not raw-oriented. */ template raw_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_RAW); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const raw_protocol& p1, const raw_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const raw_protocol& p1, const raw_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_raw_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_RAW_PROTOCOL_HPP galera-4-26.4.25/asio/asio/generic/detail/000755 000164 177776 00000000000 15107057160 021254 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/generic/detail/impl/000755 000164 177776 00000000000 15107057160 022215 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/generic/detail/impl/endpoint.ipp000644 000164 177776 00000005162 15107057155 024557 0ustar00jenkinsnogroup000000 000000 // // generic/detail/impl/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/generic/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { namespace detail { endpoint::endpoint() { init(0, 0, 0); } endpoint::endpoint(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol) { init(sock_addr, sock_addr_size, sock_protocol); } void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } else { size_ = new_size; protocol_ = 0; } } bool operator==(const endpoint& e1, const endpoint& e2) { using namespace std; // For memcmp. return e1.size() == e2.size() && memcmp(e1.data(), e2.data(), e1.size()) == 0; } bool operator<(const endpoint& e1, const endpoint& e2) { if (e1.protocol() < e2.protocol()) return true; if (e1.protocol() > e2.protocol()) return false; using namespace std; // For memcmp. std::size_t compare_size = e1.size() < e2.size() ? e1.size() : e2.size(); int compare_result = memcmp(e1.data(), e2.data(), compare_size); if (compare_result < 0) return true; if (compare_result > 0) return false; return e1.size() < e2.size(); } void endpoint::init(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol) { if (sock_addr_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } using namespace std; // For memset and memcpy. memset(&data_.generic, 0, sizeof(asio::detail::sockaddr_storage_type)); if (sock_addr_size > 0) memcpy(&data_.generic, sock_addr, sock_addr_size); size_ = sock_addr_size; protocol_ = sock_protocol; } } // namespace detail } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_DETAIL_IMPL_ENDPOINT_IPP galera-4-26.4.25/asio/asio/generic/detail/endpoint.hpp000644 000164 177776 00000006165 15107057155 023621 0ustar00jenkinsnogroup000000 000000 // // generic/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DETAIL_ENDPOINT_HPP #define ASIO_GENERIC_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { namespace detail { // Helper class for implementing a generic socket endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint(); // Construct an endpoint from the specified raw bytes. ASIO_DECL endpoint(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol); // Copy constructor. endpoint(const endpoint& other) : data_(other.data_), size_(other.size_), protocol_(other.protocol_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) { data_ = other.data_; size_ = other.size_; protocol_ = other.protocol_; return *this; } // Get the address family associated with the endpoint. int family() const { return data_.base.sa_family; } // Get the socket protocol associated with the endpoint. int protocol() const { return protocol_; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const { return size_; } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const { return sizeof(asio::detail::sockaddr_storage_type); } // Compare two endpoints for equality. ASIO_DECL friend bool operator==( const endpoint& e1, const endpoint& e2); // Compare endpoints for ordering. ASIO_DECL friend bool operator<( const endpoint& e1, const endpoint& e2); private: // The underlying socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_storage_type generic; } data_; // The length of the socket address stored in the endpoint. std::size_t size_; // The socket protocol associated with the endpoint. int protocol_; // Initialise with a specified memory. ASIO_DECL void init(const void* sock_addr, std::size_t sock_addr_size, int sock_protocol); }; } // namespace detail } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/generic/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_GENERIC_DETAIL_ENDPOINT_HPP galera-4-26.4.25/asio/asio/generic/basic_endpoint.hpp000644 000164 177776 00000011132 15107057155 023506 0ustar00jenkinsnogroup000000 000000 // // generic/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_BASIC_ENDPOINT_HPP #define ASIO_GENERIC_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/generic/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Describes an endpoint for any socket type. /** * The asio::generic::basic_endpoint class template describes an endpoint * that may be associated with any socket type. * * @note The socket types sockaddr type must be able to fit into a * @c sockaddr_storage structure. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef Protocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() { } /// Construct an endpoint from the specified socket address. basic_endpoint(const void* socket_address, std::size_t socket_address_size, int socket_protocol = 0) : impl_(socket_address, socket_address_size, socket_protocol) { } /// Construct an endpoint from the specific endpoint type. template basic_endpoint(const Endpoint& endpoint) : impl_(endpoint.data(), endpoint.size(), endpoint.protocol().protocol()) { } /// Copy constructor. basic_endpoint(const basic_endpoint& other) : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_endpoint(basic_endpoint&& other) : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// The protocol associated with the endpoint. protocol_type protocol() const { return protocol_type(impl_.family(), impl_.protocol()); } /// Get the underlying endpoint in the native type. data_type* data() { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const { return impl_.capacity(); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1.impl_ == e2.impl_); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) { return !(e1 < e2); } private: // The underlying generic endpoint. asio::generic::detail::endpoint impl_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_BASIC_ENDPOINT_HPP galera-4-26.4.25/asio/asio/generic/seq_packet_protocol.hpp000644 000164 177776 00000006055 15107057155 024575 0ustar00jenkinsnogroup000000 000000 // // generic/seq_packet_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP #define ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_seq_packet_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic sequenced packet socket. /** * The asio::generic::seq_packet_protocol class contains flags necessary * for seq_packet-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code seq_packet_protocol p(AF_INET, IPPROTO_SCTP); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class seq_packet_protocol { public: /// Construct a protocol object for a specific address family and protocol. seq_packet_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not based around * sequenced packets. */ template seq_packet_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_SEQPACKET); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const seq_packet_protocol& p1, const seq_packet_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const seq_packet_protocol& p1, const seq_packet_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_seq_packet_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_SEQ_PACKET_PROTOCOL_HPP galera-4-26.4.25/asio/asio/generic/stream_protocol.hpp000644 000164 177776 00000006367 15107057155 023757 0ustar00jenkinsnogroup000000 000000 // // generic/stream_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_STREAM_PROTOCOL_HPP #define ASIO_GENERIC_STREAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic stream-oriented socket. /** * The asio::generic::stream_protocol class contains flags necessary for * stream-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code stream_protocol p(AF_INET, IPPROTO_TCP); @endcode * Constructing from a specific protocol type: * @code stream_protocol p(asio::ip::tcp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class stream_protocol { public: /// Construct a protocol object for a specific address family and protocol. stream_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not stream-oriented. */ template stream_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_STREAM); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const stream_protocol& p1, const stream_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const stream_protocol& p1, const stream_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_stream_socket socket; #if !defined(ASIO_NO_IOSTREAM) /// The generic socket iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_STREAM_PROTOCOL_HPP galera-4-26.4.25/asio/asio/generic/datagram_protocol.hpp000644 000164 177776 00000006134 15107057155 024234 0ustar00jenkinsnogroup000000 000000 // // generic/datagram_protocol.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP #define ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace generic { /// Encapsulates the flags needed for a generic datagram-oriented socket. /** * The asio::generic::datagram_protocol class contains flags necessary * for datagram-oriented sockets of any address family and protocol. * * @par Examples * Constructing using a native address family and socket protocol: * @code datagram_protocol p(AF_INET, IPPROTO_UDP); @endcode * Constructing from a specific protocol type: * @code datagram_protocol p(asio::ip::udp::v4()); @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol. */ class datagram_protocol { public: /// Construct a protocol object for a specific address family and protocol. datagram_protocol(int address_family, int socket_protocol) : family_(address_family), protocol_(socket_protocol) { } /// Construct a generic protocol object from a specific protocol. /** * @throws @c bad_cast Thrown if the source protocol is not datagram-oriented. */ template datagram_protocol(const Protocol& source_protocol) : family_(source_protocol.family()), protocol_(source_protocol.protocol()) { if (source_protocol.type() != type()) { std::bad_cast ex; asio::detail::throw_exception(ex); } } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_DGRAM); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// Compare two protocols for equality. friend bool operator==(const datagram_protocol& p1, const datagram_protocol& p2) { return p1.family_ == p2.family_ && p1.protocol_ == p2.protocol_; } /// Compare two protocols for inequality. friend bool operator!=(const datagram_protocol& p1, const datagram_protocol& p2) { return !(p1 == p2); } /// The type of an endpoint. typedef basic_endpoint endpoint; /// The generic socket type. typedef basic_datagram_socket socket; private: int family_; int protocol_; }; } // namespace generic } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_GENERIC_DATAGRAM_PROTOCOL_HPP galera-4-26.4.25/asio/asio/use_future.hpp000644 000164 177776 00000011043 15107057155 021300 0ustar00jenkinsnogroup000000 000000 // // use_future.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_USE_FUTURE_HPP #define ASIO_USE_FUTURE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/future.hpp" #if defined(ASIO_HAS_STD_FUTURE_CLASS) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class packaged_token; template class packaged_handler; } // namespace detail /// Class used to specify that an asynchronous operation should return a future. /** * The use_future_t class is used to indicate that an asynchronous operation * should return a std::future object. A use_future_t object may be passed as a * handler to an asynchronous operation, typically using the special value @c * asio::use_future. For example: * * @code std::future my_future * = my_socket.async_read_some(my_buffer, asio::use_future); @endcode * * The initiating function (async_read_some in the above example) returns a * future that will receive the result of the operation. If the operation * completes with an error_code indicating failure, it is converted into a * system_error and passed back to the caller via the future. */ template > class use_future_t { public: /// The allocator type. The allocator is used when constructing the /// @c std::promise object for a given asynchronous operation. typedef Allocator allocator_type; /// Construct using default-constructed allocator. ASIO_CONSTEXPR use_future_t() { } /// Construct using specified allocator. explicit use_future_t(const Allocator& allocator) : allocator_(allocator) { } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use rebind().) Specify an alternate allocator. template use_future_t operator[](const OtherAllocator& allocator) const { return use_future_t(allocator); } #endif // !defined(ASIO_NO_DEPRECATED) /// Specify an alternate allocator. template use_future_t rebind(const OtherAllocator& allocator) const { return use_future_t(allocator); } /// Obtain allocator. allocator_type get_allocator() const { return allocator_; } /// Wrap a function object in a packaged task. /** * The @c package function is used to adapt a function object as a packaged * task. When this adapter is passed as a completion token to an asynchronous * operation, the result of the function object is retuned via a std::future. * * @par Example * * @code std::future fut = * my_socket.async_read_some(buffer, * use_future([](asio::error_code ec, std::size_t n) * { * return ec ? 0 : n; * })); * ... * std::size_t n = fut.get(); @endcode */ template #if defined(GENERATING_DOCUMENTATION) unspecified #else // defined(GENERATING_DOCUMENTATION) detail::packaged_token::type, Allocator> #endif // defined(GENERATING_DOCUMENTATION) operator()(ASIO_MOVE_ARG(Function) f) const; private: // Helper type to ensure that use_future can be constexpr default-constructed // even when std::allocator can't be. struct std_allocator_void { ASIO_CONSTEXPR std_allocator_void() { } operator std::allocator() const { return std::allocator(); } }; typename conditional< is_same, Allocator>::value, std_allocator_void, Allocator>::type allocator_; }; /// A special value, similar to std::nothrow. /** * See the documentation for asio::use_future_t for a usage example. */ #if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION) constexpr use_future_t<> use_future; #elif defined(ASIO_MSVC) __declspec(selectany) use_future_t<> use_future; #endif } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/use_future.hpp" #endif // defined(ASIO_HAS_STD_FUTURE_CLASS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_USE_FUTURE_HPP galera-4-26.4.25/asio/asio/detached.hpp000644 000164 177776 00000002772 15107057155 020664 0ustar00jenkinsnogroup000000 000000 // // detached.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETACHED_HPP #define ASIO_DETACHED_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { /// Class used to specify that an asynchronous operation is detached. /** * The detached_t class is used to indicate that an asynchronous operation is * detached. That is, there is no completion handler waiting for the * operation's result. A detached_t object may be passed as a handler to an * asynchronous operation, typically using the special value * @c asio::detached. For example: * @code my_socket.async_send(my_buffer, asio::detached); * @endcode */ class detached_t { public: /// Constructor. ASIO_CONSTEXPR detached_t() { } }; /// A special value, similar to std::nothrow. /** * See the documentation for asio::detached_t for a usage example. */ #if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION) constexpr detached_t detached; #elif defined(ASIO_MSVC) __declspec(selectany) detached_t detached; #endif } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/detached.hpp" #endif // ASIO_DETACHED_HPP galera-4-26.4.25/asio/asio/is_read_buffered.hpp000644 000164 177776 00000003021 15107057155 022357 0ustar00jenkinsnogroup000000 000000 // // is_read_buffered.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IS_READ_BUFFERED_HPP #define ASIO_IS_READ_BUFFERED_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffered_read_stream_fwd.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template char is_read_buffered_helper(buffered_stream* s); template char is_read_buffered_helper(buffered_read_stream* s); struct is_read_buffered_big_type { char data[10]; }; is_read_buffered_big_type is_read_buffered_helper(...); } // namespace detail /// The is_read_buffered class is a traits class that may be used to determine /// whether a stream type supports buffering of read data. template class is_read_buffered { public: #if defined(GENERATING_DOCUMENTATION) /// The value member is true only if the Stream type supports buffering of /// read data. static const bool value; #else ASIO_STATIC_CONSTANT(bool, value = sizeof(detail::is_read_buffered_helper((Stream*)0)) == 1); #endif }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IS_READ_BUFFERED_HPP galera-4-26.4.25/asio/asio/posix/000755 000164 177776 00000000000 15107057160 017540 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/posix/descriptor_base.hpp000644 000164 177776 00000004311 15107057155 023424 0ustar00jenkinsnogroup000000 000000 // // posix/descriptor_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POSIX_DESCRIPTOR_BASE_HPP #define ASIO_POSIX_DESCRIPTOR_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/io_control.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace posix { /// The descriptor_base class is used as a base for the descriptor class as a /// place to define the associated IO control commands. class descriptor_base { public: /// Wait types. /** * For use with descriptor::wait() and descriptor::async_wait(). */ enum wait_type { /// Wait for a descriptor to become ready to read. wait_read, /// Wait for a descriptor to become ready to write. wait_write, /// Wait for a descriptor to have error conditions pending. wait_error }; /// IO control command to get the amount of data that can be read without /// blocking. /** * Implements the FIONREAD IO control command. * * @par Example * @code * asio::posix::stream_descriptor descriptor(my_context); * ... * asio::descriptor_base::bytes_readable command(true); * descriptor.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode * * @par Concepts: * IoControlCommand. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined bytes_readable; #else typedef asio::detail::io_control::bytes_readable bytes_readable; #endif protected: /// Protected destructor to prevent deletion through this type. ~descriptor_base() { } }; } // namespace posix } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_POSIX_DESCRIPTOR_BASE_HPP galera-4-26.4.25/asio/asio/posix/descriptor.hpp000644 000164 177776 00000001663 15107057155 022441 0ustar00jenkinsnogroup000000 000000 // // posix/descriptor.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POSIX_DESCRIPTOR_HPP #define ASIO_POSIX_DESCRIPTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/posix/basic_descriptor.hpp" namespace asio { namespace posix { /// Typedef for the typical usage of basic_descriptor. typedef basic_descriptor<> descriptor; } // namespace posix } // namespace asio #endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_POSIX_DESCRIPTOR_HPP galera-4-26.4.25/asio/asio/posix/basic_descriptor.hpp000644 000164 177776 00000053541 15107057155 023604 0ustar00jenkinsnogroup000000 000000 // // posix/basic_descriptor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POSIX_BASIC_DESCRIPTOR_HPP #define ASIO_POSIX_BASIC_DESCRIPTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/posix/descriptor_base.hpp" #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { namespace posix { /// Provides POSIX descriptor functionality. /** * The posix::basic_descriptor class template provides the ability to wrap a * POSIX descriptor. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_descriptor : public descriptor_base { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a descriptor. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef detail::reactive_descriptor_service::native_handle_type native_handle_type; #endif /// A descriptor is always the lowest layer. typedef basic_descriptor lowest_layer_type; /// Construct a descriptor without opening it. /** * This constructor creates a descriptor without opening it. * * @param ex The I/O executor that the descriptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * descriptor. */ explicit basic_descriptor(const executor_type& ex) : impl_(ex) { } /// Construct a descriptor without opening it. /** * This constructor creates a descriptor without opening it. * * @param context An execution context which provides the I/O executor that * the descriptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the descriptor. */ template explicit basic_descriptor(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Construct a descriptor on an existing native descriptor. /** * This constructor creates a descriptor object to hold an existing native * descriptor. * * @param ex The I/O executor that the descriptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * descriptor. * * @param native_descriptor A native descriptor. * * @throws asio::system_error Thrown on failure. */ basic_descriptor(const executor_type& ex, const native_handle_type& native_descriptor) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_descriptor, ec); asio::detail::throw_error(ec, "assign"); } /// Construct a descriptor on an existing native descriptor. /** * This constructor creates a descriptor object to hold an existing native * descriptor. * * @param context An execution context which provides the I/O executor that * the descriptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the descriptor. * * @param native_descriptor A native descriptor. * * @throws asio::system_error Thrown on failure. */ template basic_descriptor(ExecutionContext& context, const native_handle_type& native_descriptor, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_descriptor, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a descriptor from another. /** * This constructor moves a descriptor from one object to another. * * @param other The other descriptor object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_descriptor(const executor_type&) * constructor. */ basic_descriptor(basic_descriptor&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a descriptor from another. /** * This assignment operator moves a descriptor from one object to another. * * @param other The other descriptor object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_descriptor(const executor_type&) * constructor. */ basic_descriptor& operator=(basic_descriptor&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a descriptor cannot contain any further layers, it * simply returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a descriptor cannot contain any further layers, it * simply returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Assign an existing native descriptor to the descriptor. /* * This function opens the descriptor to hold an existing native descriptor. * * @param native_descriptor A native descriptor. * * @throws asio::system_error Thrown on failure. */ void assign(const native_handle_type& native_descriptor) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_descriptor, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native descriptor to the descriptor. /* * This function opens the descriptor to hold an existing native descriptor. * * @param native_descriptor A native descriptor. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const native_handle_type& native_descriptor, asio::error_code& ec) { impl_.get_service().assign( impl_.get_implementation(), native_descriptor, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the descriptor is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Close the descriptor. /** * This function is used to close the descriptor. Any asynchronous read or * write operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. Note that, even if * the function indicates an error, the underlying descriptor is closed. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the descriptor. /** * This function is used to close the descriptor. Any asynchronous read or * write operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. Note that, even if * the function indicates an error, the underlying descriptor is closed. */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the native descriptor representation. /** * This function may be used to obtain the underlying representation of the * descriptor. This is intended to allow access to native descriptor * functionality that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Release ownership of the native descriptor implementation. /** * This function may be used to obtain the underlying representation of the * descriptor. After calling this function, @c is_open() returns false. The * caller is responsible for closing the descriptor. * * All outstanding asynchronous read or write operations will finish * immediately, and the handlers for cancelled operations will be passed the * asio::error::operation_aborted error. */ native_handle_type release() { return impl_.get_service().release(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the descriptor. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the descriptor. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform an IO control command on the descriptor. /** * This function is used to execute an IO control command on the descriptor. * * @param command The IO control command to be performed on the descriptor. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::posix::descriptor_base::bytes_readable @n * asio::posix::descriptor_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::posix::stream_descriptor descriptor(my_context); * ... * asio::posix::stream_descriptor::bytes_readable command; * descriptor.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode */ template void io_control(IoControlCommand& command) { asio::error_code ec; impl_.get_service().io_control(impl_.get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } /// Perform an IO control command on the descriptor. /** * This function is used to execute an IO control command on the descriptor. * * @param command The IO control command to be performed on the descriptor. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::posix::descriptor_base::bytes_readable @n * asio::posix::descriptor_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::posix::stream_descriptor descriptor(my_context); * ... * asio::posix::stream_descriptor::bytes_readable command; * asio::error_code ec; * descriptor.io_control(command, ec); * if (ec) * { * // An error occurred. * } * std::size_t bytes_readable = command.get(); * @endcode */ template ASIO_SYNC_OP_VOID io_control(IoControlCommand& command, asio::error_code& ec) { impl_.get_service().io_control(impl_.get_implementation(), command, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the descriptor. /** * @returns @c true if the descriptor's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return impl_.get_service().non_blocking(impl_.get_implementation()); } /// Sets the non-blocking mode of the descriptor. /** * @param mode If @c true, the descriptor's synchronous operations will fail * with asio::error::would_block if they are unable to perform the * requested operation immediately. If @c false, synchronous operations will * block until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } /// Sets the non-blocking mode of the descriptor. /** * @param mode If @c true, the descriptor's synchronous operations will fail * with asio::error::would_block if they are unable to perform the * requested operation immediately. If @c false, synchronous operations will * block until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ ASIO_SYNC_OP_VOID non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the native descriptor implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native descriptor. This mode has no effect on the behaviour of the * descriptor object's synchronous operations. * * @returns @c true if the underlying descriptor is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the descriptor object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native descriptor. */ bool native_non_blocking() const { return impl_.get_service().native_non_blocking( impl_.get_implementation()); } /// Sets the non-blocking mode of the native descriptor implementation. /** * This function is used to modify the non-blocking mode of the underlying * native descriptor. It has no effect on the behaviour of the descriptor * object's synchronous operations. * * @param mode If @c true, the underlying descriptor is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. */ void native_non_blocking(bool mode) { asio::error_code ec; impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } /// Sets the non-blocking mode of the native descriptor implementation. /** * This function is used to modify the non-blocking mode of the underlying * native descriptor. It has no effect on the behaviour of the descriptor * object's synchronous operations. * * @param mode If @c true, the underlying descriptor is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. */ ASIO_SYNC_OP_VOID native_non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Wait for the descriptor to become ready to read, ready to write, or to /// have pending error conditions. /** * This function is used to perform a blocking wait for a descriptor to enter * a ready to read, write or error condition state. * * @param w Specifies the desired descriptor state. * * @par Example * Waiting for a descriptor to become readable. * @code * asio::posix::stream_descriptor descriptor(my_context); * ... * descriptor.wait(asio::posix::stream_descriptor::wait_read); * @endcode */ void wait(wait_type w) { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), w, ec); asio::detail::throw_error(ec, "wait"); } /// Wait for the descriptor to become ready to read, ready to write, or to /// have pending error conditions. /** * This function is used to perform a blocking wait for a descriptor to enter * a ready to read, write or error condition state. * * @param w Specifies the desired descriptor state. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Waiting for a descriptor to become readable. * @code * asio::posix::stream_descriptor descriptor(my_context); * ... * asio::error_code ec; * descriptor.wait(asio::posix::stream_descriptor::wait_read, ec); * @endcode */ ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), w, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Asynchronously wait for the descriptor to become ready to read, ready to /// write, or to have pending error conditions. /** * This function is used to perform an asynchronous wait for a descriptor to * enter a ready to read, write or error condition state. * * @param w Specifies the desired descriptor state. * * @param handler The handler to be called when the wait operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void wait_handler(const asio::error_code& error) * { * if (!error) * { * // Wait succeeded. * } * } * * ... * * asio::posix::stream_descriptor descriptor(my_context); * ... * descriptor.async_wait( * asio::posix::stream_descriptor::wait_read, * wait_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(wait_type w, ASIO_MOVE_ARG(WaitHandler) handler) { return async_initiate( initiate_async_wait(), handler, this, w); } protected: /// Protected destructor to prevent deletion through this type. /** * This function destroys the descriptor, cancelling any outstanding * asynchronous wait operations associated with the descriptor as if by * calling @c cancel. */ ~basic_descriptor() { } detail::io_object_impl impl_; private: // Disallow copying and assignment. basic_descriptor(const basic_descriptor&) ASIO_DELETED; basic_descriptor& operator=(const basic_descriptor&) ASIO_DELETED; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(WaitHandler) handler, basic_descriptor* self, wait_type w) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), w, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace posix } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_POSIX_BASIC_DESCRIPTOR_HPP galera-4-26.4.25/asio/asio/posix/basic_stream_descriptor.hpp000644 000164 177776 00000037406 15107057155 025161 0ustar00jenkinsnogroup000000 000000 // // posix/basic_stream_descriptor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP #define ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/posix/descriptor.hpp" #if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \ || defined(GENERATING_DOCUMENTATION) namespace asio { namespace posix { /// Provides stream-oriented descriptor functionality. /** * The posix::basic_stream_descriptor class template provides asynchronous and * blocking stream-oriented descriptor functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class basic_stream_descriptor : public basic_descriptor { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a descriptor. typedef typename basic_descriptor::native_handle_type native_handle_type; /// Construct a stream descriptor without opening it. /** * This constructor creates a stream descriptor without opening it. The * descriptor needs to be opened and then connected or accepted before data * can be sent or received on it. * * @param ex The I/O executor that the descriptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * descriptor. */ explicit basic_stream_descriptor(const executor_type& ex) : basic_descriptor(ex) { } /// Construct a stream descriptor without opening it. /** * This constructor creates a stream descriptor without opening it. The * descriptor needs to be opened and then connected or accepted before data * can be sent or received on it. * * @param context An execution context which provides the I/O executor that * the descriptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the descriptor. */ template explicit basic_stream_descriptor(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : basic_descriptor(context) { } /// Construct a stream descriptor on an existing native descriptor. /** * This constructor creates a stream descriptor object to hold an existing * native descriptor. * * @param ex The I/O executor that the descriptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * descriptor. * * @param native_descriptor The new underlying descriptor implementation. * * @throws asio::system_error Thrown on failure. */ basic_stream_descriptor(const executor_type& ex, const native_handle_type& native_descriptor) : basic_descriptor(ex, native_descriptor) { } /// Construct a stream descriptor on an existing native descriptor. /** * This constructor creates a stream descriptor object to hold an existing * native descriptor. * * @param context An execution context which provides the I/O executor that * the descriptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the descriptor. * * @param native_descriptor The new underlying descriptor implementation. * * @throws asio::system_error Thrown on failure. */ template basic_stream_descriptor(ExecutionContext& context, const native_handle_type& native_descriptor, typename enable_if< is_convertible::value >::type* = 0) : basic_descriptor(context, native_descriptor) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a stream descriptor from another. /** * This constructor moves a stream descriptor from one object to another. * * @param other The other stream descriptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_descriptor(const executor_type&) * constructor. */ basic_stream_descriptor(basic_stream_descriptor&& other) : descriptor(std::move(other)) { } /// Move-assign a stream descriptor from another. /** * This assignment operator moves a stream descriptor from one object to * another. * * @param other The other stream descriptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_descriptor(const executor_type&) * constructor. */ basic_stream_descriptor& operator=(basic_stream_descriptor&& other) { descriptor::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Write some data to the descriptor. /** * This function is used to write data to the stream descriptor. The function * call will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the descriptor. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * descriptor.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().write_some( this->impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the descriptor. /** * This function is used to write data to the stream descriptor. The function * call will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the descriptor. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().write_some( this->impl_.get_implementation(), buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the stream * descriptor. The function call always returns immediately. * * @param buffers One or more data buffers to be written to the descriptor. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * descriptor.async_write_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_write_some(), handler, this, buffers); } /// Read some data from the descriptor. /** * This function is used to read data from the stream descriptor. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * descriptor.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().read_some( this->impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the descriptor. /** * This function is used to read data from the stream descriptor. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().read_some( this->impl_.get_implementation(), buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the stream * descriptor. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * descriptor.async_read_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_read_some(), handler, this, buffers); } private: struct initiate_async_write_some { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_stream_descriptor* self, const ConstBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_write_some( self->impl_.get_implementation(), buffers, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_read_some { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_stream_descriptor* self, const MutableBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_read_some( self->impl_.get_implementation(), buffers, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace posix } // namespace asio #endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_POSIX_BASIC_STREAM_DESCRIPTOR_HPP galera-4-26.4.25/asio/asio/posix/stream_descriptor.hpp000644 000164 177776 00000001771 15107057155 024014 0ustar00jenkinsnogroup000000 000000 // // posix/stream_descriptor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POSIX_STREAM_DESCRIPTOR_HPP #define ASIO_POSIX_STREAM_DESCRIPTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/posix/basic_stream_descriptor.hpp" namespace asio { namespace posix { /// Typedef for the typical usage of a stream-oriented descriptor. typedef basic_stream_descriptor<> stream_descriptor; } // namespace posix } // namespace asio #endif // defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_POSIX_STREAM_DESCRIPTOR_HPP galera-4-26.4.25/asio/asio/streambuf.hpp000644 000164 177776 00000001351 15107057155 021103 0ustar00jenkinsnogroup000000 000000 // // streambuf.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STREAMBUF_HPP #define ASIO_STREAMBUF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include "asio/basic_streambuf.hpp" namespace asio { /// Typedef for the typical usage of basic_streambuf. typedef basic_streambuf<> streambuf; } // namespace asio #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_STREAMBUF_HPP galera-4-26.4.25/asio/asio/write.hpp000644 000164 177776 00000142540 15107057155 020253 0ustar00jenkinsnogroup000000 000000 // // write.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WRITE_HPP #define ASIO_WRITE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffer.hpp" #include "asio/error.hpp" #if !defined(ASIO_NO_EXTENSIONS) # include "asio/basic_streambuf_fwd.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup write asio::write * * @brief The @c write function is a composed operation that writes a certain * amount of data to a stream before returning. */ /*@{*/ /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, typename enable_if< is_const_buffer_sequence::value >::type* = 0); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, asio::error_code& ec, typename enable_if< is_const_buffer_sequence::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write(s, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, typename enable_if< is_const_buffer_sequence::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * stream. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_const_buffer_sequence::value >::type* = 0); #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::write( * s, b, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::write( * s, b, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, asio::error_code& ec); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Write all of the supplied data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::write( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Write a certain amount of data to a stream before returning. /** * This function is used to write a certain number of bytes of data to a stream. * The call will block until one of the following conditions is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * write_some function. * * @param s The stream to which the data is to be written. The type must support * the SyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Successfully written data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's write_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write(SyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /*@}*/ /** * @defgroup async_write asio::async_write * * @brief The @c async_write function is a composed asynchronous operation that * writes a certain amount of data to a stream before completion. */ /*@{*/ /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * asio::async_write(s, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_const_buffer_sequence::value >::type* = 0); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::async_write(s, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, const ConstBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_const_buffer_sequence::value >::type* = 0); #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. Successfully written * data is automatically consumed from the buffers. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. Successfully written * data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, basic_streambuf& b, ASIO_MOVE_ARG(WriteHandler) handler); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to write all of the supplied data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. Successfully written * data is automatically consumed from the buffers. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Start an asynchronous operation to write a certain amount of data to a /// stream. /** * This function is used to asynchronously write a certain number of bytes of * data to a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li All of the data in the supplied dynamic buffer sequence has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_write_some function, and is known as a composed operation. The * program must ensure that the stream performs no other write operations (such * as async_write, the stream's async_write_some function, or any other composed * operations that perform writes) until this operation completes. * * @param s The stream to which the data is to be written. The type must support * the AsyncWriteStream concept. * * @param buffers The dynamic buffer sequence from which data will be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. Successfully written * data is automatically consumed from the buffers. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the stream's async_write_some function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes written from the * // buffers. If an error occurred, * // this will be less than the sum * // of the buffer sizes. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write(AsyncWriteStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/write.hpp" #endif // ASIO_WRITE_HPP galera-4-26.4.25/asio/asio/basic_stream_socket.hpp000644 000164 177776 00000113513 15107057155 023123 0ustar00jenkinsnogroup000000 000000 // // basic_stream_socket.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_STREAM_SOCKET_HPP #define ASIO_BASIC_STREAM_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL) #define ASIO_BASIC_STREAM_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_stream_socket; #endif // !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL) /// Provides stream-oriented socket functionality. /** * The basic_stream_socket class template provides asynchronous and blocking * stream-oriented socket functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class basic_stream_socket : public basic_socket { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Rebinds the socket type to another executor. template struct rebind_executor { /// The socket type when rebound to the specified executor. typedef basic_stream_socket other; }; /// The native representation of a socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef typename basic_socket::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct a basic_stream_socket without opening it. /** * This constructor creates a stream socket without opening it. The socket * needs to be opened and then connected or accepted before data can be sent * or received on it. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_stream_socket(const executor_type& ex) : basic_socket(ex) { } /// Construct a basic_stream_socket without opening it. /** * This constructor creates a stream socket without opening it. The socket * needs to be opened and then connected or accepted before data can be sent * or received on it. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. */ template explicit basic_stream_socket(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context) { } /// Construct and open a basic_stream_socket. /** * This constructor creates and opens a stream socket. The socket needs to be * connected or accepted before data can be sent or received on it. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(const executor_type& ex, const protocol_type& protocol) : basic_socket(ex, protocol) { } /// Construct and open a basic_stream_socket. /** * This constructor creates and opens a stream socket. The socket needs to be * connected or accepted before data can be sent or received on it. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_stream_socket(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol) { } /// Construct a basic_stream_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a stream socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the stream * socket will be bound. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(const executor_type& ex, const endpoint_type& endpoint) : basic_socket(ex, endpoint) { } /// Construct a basic_stream_socket, opening it and binding it to the given /// local endpoint. /** * This constructor creates a stream socket and automatically opens it bound * to the specified endpoint on the local machine. The protocol used is the * protocol associated with the given endpoint. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the stream * socket will be bound. * * @throws asio::system_error Thrown on failure. */ template basic_stream_socket(ExecutionContext& context, const endpoint_type& endpoint, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, endpoint) { } /// Construct a basic_stream_socket on an existing native socket. /** * This constructor creates a stream socket object to hold an existing native * socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ basic_stream_socket(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_socket) : basic_socket(ex, protocol, native_socket) { } /// Construct a basic_stream_socket on an existing native socket. /** * This constructor creates a stream socket object to hold an existing native * socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket The new underlying socket implementation. * * @throws asio::system_error Thrown on failure. */ template basic_stream_socket(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_socket, typename enable_if< is_convertible::value >::type* = 0) : basic_socket(context, protocol, native_socket) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_stream_socket from another. /** * This constructor moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(const executor_type&) * constructor. */ basic_stream_socket(basic_stream_socket&& other) : basic_socket(std::move(other)) { } /// Move-assign a basic_stream_socket from another. /** * This assignment operator moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(const executor_type&) * constructor. */ basic_stream_socket& operator=(basic_stream_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } /// Move-construct a basic_stream_socket from a socket of another protocol /// type. /** * This constructor moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(const executor_type&) * constructor. */ template basic_stream_socket(basic_stream_socket&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : basic_socket(std::move(other)) { } /// Move-assign a basic_stream_socket from a socket of another protocol type. /** * This assignment operator moves a stream socket from one object to another. * * @param other The other basic_stream_socket object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_socket(const executor_type&) * constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_stream_socket& >::type operator=(basic_stream_socket&& other) { basic_socket::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the socket. /** * This function destroys the socket, cancelling any outstanding asynchronous * operations associated with the socket as if by calling @c cancel. */ ~basic_stream_socket() { } /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @returns The number of bytes sent. * * @throws asio::system_error Thrown on failure. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.send(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "send"); return s; } /// Send some data on the socket. /** * This function is used to send data on the stream socket. The function * call will block until one or more bytes of the data has been sent * successfully, or an until error occurs. * * @param buffers One or more data buffers to be sent on the socket. * * @param flags Flags specifying how the send call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes sent. Returns 0 if an error occurred. * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref write function if you need to ensure that all data * is written before the blocking operation completes. */ template std::size_t send(const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().send( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous send. /** * This function is used to asynchronously send data on the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be sent on the socket. Although * the buffers object may be copied as necessary, ownership of the underlying * memory blocks is retained by the caller, which must guarantee that they * remain valid until the handler is called. * * @param flags Flags specifying how the send call is to be made. * * @param handler The handler to be called when the send operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes sent. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The send operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To send a single data buffer use the @ref buffer function as follows: * @code * socket.async_send(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on sending multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_send(const ConstBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, flags); } /// Receive some data on the socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on the socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @returns The number of bytes received. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.receive(asio::buffer(data, size), 0); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); asio::detail::throw_error(ec, "receive"); return s; } /// Receive some data on a connected socket. /** * This function is used to receive data on the stream socket. The function * call will block until one or more bytes of data has been received * successfully, or until an error occurs. * * @param buffers One or more buffers into which the data will be received. * * @param flags Flags specifying how the receive call is to be made. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes received. Returns 0 if an error occurred. * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, flags, ec); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the stream * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref async_read function if you need to ensure * that the requested amount of data is received before the asynchronous * operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, socket_base::message_flags(0)); } /// Start an asynchronous receive. /** * This function is used to asynchronously receive data from the stream * socket. The function call always returns immediately. * * @param buffers One or more buffers into which the data will be received. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param flags Flags specifying how the receive call is to be made. * * @param handler The handler to be called when the receive operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes received. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The receive operation may not receive all of the requested number of * bytes. Consider using the @ref async_read function if you need to ensure * that the requested amount of data is received before the asynchronous * operation completes. * * @par Example * To receive into a single data buffer use the @ref buffer function as * follows: * @code * socket.async_receive(asio::buffer(data, size), 0, handler); * @endcode * See the @ref buffer documentation for information on receiving into * multiple buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_receive(const MutableBufferSequence& buffers, socket_base::message_flags flags, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, flags); } /// Write some data to the socket. /** * This function is used to write data to the stream socket. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the socket. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * socket.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().send( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the socket. /** * This function is used to write data to the stream socket. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().send( this->impl_.get_implementation(), buffers, 0, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the stream socket. * The function call always returns immediately. * * @param buffers One or more data buffers to be written to the socket. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * socket.async_write_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_send(), handler, this, buffers, socket_base::message_flags(0)); } /// Read some data from the socket. /** * This function is used to read data from the stream socket. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * socket.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, 0, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the socket. /** * This function is used to read data from the stream socket. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().receive( this->impl_.get_implementation(), buffers, 0, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the stream socket. * The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * socket.async_read_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_receive(), handler, this, buffers, socket_base::message_flags(0)); } private: struct initiate_async_send { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_stream_socket* self, const ConstBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_send( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_receive { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_stream_socket* self, const MutableBufferSequence& buffers, socket_base::message_flags flags) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_receive( self->impl_.get_implementation(), buffers, flags, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_STREAM_SOCKET_HPP galera-4-26.4.25/asio/asio/packaged_task.hpp000644 000164 177776 00000006163 15107057155 021702 0ustar00jenkinsnogroup000000 000000 // // packaged_task.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_PACKAGED_TASK_HPP #define ASIO_PACKAGED_TASK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/future.hpp" #if defined(ASIO_HAS_STD_FUTURE_CLASS) \ || defined(GENERATING_DOCUMENTATION) #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_VARIADIC_TEMPLATES) \ || defined(GENERATING_DOCUMENTATION) /// Partial specialisation of @c async_result for @c std::packaged_task. template class async_result, Signature> { public: /// The packaged task is the concrete completion handler type. typedef std::packaged_task completion_handler_type; /// The return type of the initiating function is the future obtained from /// the packaged task. typedef std::future return_type; /// The constructor extracts the future from the packaged task. explicit async_result(completion_handler_type& h) : future_(h.get_future()) { } /// Returns the packaged task's future. return_type get() { return std::move(future_); } private: return_type future_; }; #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) template struct async_result, Signature> { typedef std::packaged_task completion_handler_type; typedef std::future return_type; explicit async_result(completion_handler_type& h) : future_(h.get_future()) { } return_type get() { return std::move(future_); } private: return_type future_; }; #define ASIO_PRIVATE_ASYNC_RESULT_DEF(n) \ template \ class async_result< \ std::packaged_task, Signature> \ { \ public: \ typedef std::packaged_task< \ Result(ASIO_VARIADIC_TARGS(n))> \ completion_handler_type; \ \ typedef std::future return_type; \ \ explicit async_result(completion_handler_type& h) \ : future_(h.get_future()) \ { \ } \ \ return_type get() \ { \ return std::move(future_); \ } \ \ private: \ return_type future_; \ }; \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_RESULT_DEF) #undef ASIO_PRIVATE_ASYNC_RESULT_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_FUTURE_CLASS) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_PACKAGED_TASK_HPP galera-4-26.4.25/asio/asio/io_service_strand.hpp000644 000164 177776 00000001047 15107057155 022617 0ustar00jenkinsnogroup000000 000000 // // io_service_strand.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IO_SERVICE_STRAND_HPP #define ASIO_IO_SERVICE_STRAND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/io_context_strand.hpp" #endif // ASIO_IO_SERVICE_STRAND_HPP galera-4-26.4.25/asio/asio/time_traits.hpp000644 000164 177776 00000004222 15107057155 021437 0ustar00jenkinsnogroup000000 000000 // // time_traits.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TIME_TRAITS_HPP #define ASIO_TIME_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/socket_types.hpp" // Must come before posix_time. #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/detail/push_options.hpp" namespace asio { /// Time traits suitable for use with the deadline timer. template struct time_traits; /// Time traits specialised for posix_time. template <> struct time_traits { /// The time type. typedef boost::posix_time::ptime time_type; /// The duration type. typedef boost::posix_time::time_duration duration_type; /// Get the current time. static time_type now() { #if defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) return boost::posix_time::microsec_clock::universal_time(); #else // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) return boost::posix_time::second_clock::universal_time(); #endif // defined(BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK) } /// Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { return t + d; } /// Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { return t1 - t2; } /// Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { return t1 < t2; } /// Convert to POSIX duration type. static boost::posix_time::time_duration to_posix_duration( const duration_type& d) { return d; } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_TIME_TRAITS_HPP galera-4-26.4.25/asio/asio/associated_executor.hpp000644 000164 177776 00000010535 15107057155 023154 0ustar00jenkinsnogroup000000 000000 // // associated_executor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ASSOCIATED_EXECUTOR_HPP #define ASIO_ASSOCIATED_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/is_executor.hpp" #include "asio/system_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct associated_executor_check { typedef void type; }; template struct associated_executor_impl { typedef E type; static type get(const T&, const E& e) ASIO_NOEXCEPT { return e; } }; template struct associated_executor_impl::type> { typedef typename T::executor_type type; static type get(const T& t, const E&) ASIO_NOEXCEPT { return t.get_executor(); } }; } // namespace detail /// Traits type used to obtain the executor associated with an object. /** * A program may specialise this traits type if the @c T template parameter in * the specialisation is a user-defined type. The template parameter @c * Executor shall be a type meeting the Executor requirements. * * Specialisations shall meet the following requirements, where @c t is a const * reference to an object of type @c T, and @c e is an object of type @c * Executor. * * @li Provide a nested typedef @c type that identifies a type meeting the * Executor requirements. * * @li Provide a noexcept static member function named @c get, callable as @c * get(t) and with return type @c type. * * @li Provide a noexcept static member function named @c get, callable as @c * get(t,e) and with return type @c type. */ template struct associated_executor { /// If @c T has a nested type @c executor_type, T::executor_type. /// Otherwise @c Executor. #if defined(GENERATING_DOCUMENTATION) typedef see_below type; #else // defined(GENERATING_DOCUMENTATION) typedef typename detail::associated_executor_impl::type type; #endif // defined(GENERATING_DOCUMENTATION) /// If @c T has a nested type @c executor_type, returns /// t.get_executor(). Otherwise returns @c ex. static type get(const T& t, const Executor& ex = Executor()) ASIO_NOEXCEPT { return detail::associated_executor_impl::get(t, ex); } }; /// Helper function to obtain an object's associated executor. /** * @returns associated_executor::get(t) */ template inline typename associated_executor::type get_associated_executor(const T& t) ASIO_NOEXCEPT { return associated_executor::get(t); } /// Helper function to obtain an object's associated executor. /** * @returns associated_executor::get(t, ex) */ template inline typename associated_executor::type get_associated_executor(const T& t, const Executor& ex, typename enable_if::value>::type* = 0) ASIO_NOEXCEPT { return associated_executor::get(t, ex); } /// Helper function to obtain an object's associated executor. /** * @returns associated_executor::get(t, ctx.get_executor()) */ template inline typename associated_executor::type get_associated_executor(const T& t, ExecutionContext& ctx, typename enable_if::value>::type* = 0) ASIO_NOEXCEPT { return associated_executor::get(t, ctx.get_executor()); } #if defined(ASIO_HAS_ALIAS_TEMPLATES) template using associated_executor_t = typename associated_executor::type; #endif // defined(ASIO_HAS_ALIAS_TEMPLATES) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_ASSOCIATED_EXECUTOR_HPP galera-4-26.4.25/asio/asio/serial_port.hpp000644 000164 177776 00000001641 15107057155 021440 0ustar00jenkinsnogroup000000 000000 // // serial_port.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SERIAL_PORT_HPP #define ASIO_SERIAL_PORT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #include "asio/basic_serial_port.hpp" namespace asio { /// Typedef for the typical usage of a serial port. typedef basic_serial_port<> serial_port; } // namespace asio #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_SERIAL_PORT_HPP galera-4-26.4.25/asio/asio/is_executor.hpp000644 000164 177776 00000002302 15107057155 021441 0ustar00jenkinsnogroup000000 000000 // // is_executor.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IS_EXECUTOR_HPP #define ASIO_IS_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// The is_executor trait detects whether a type T meets the Executor type /// requirements. /** * Class template @c is_executor is a UnaryTypeTrait that is derived from @c * true_type if the type @c T meets the syntactic requirements for Executor, * otherwise @c false_type. */ template struct is_executor #if defined(GENERATING_DOCUMENTATION) : integral_constant #else // defined(GENERATING_DOCUMENTATION) : asio::detail::is_executor #endif // defined(GENERATING_DOCUMENTATION) { }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IS_EXECUTOR_HPP galera-4-26.4.25/asio/asio/error_code.hpp000644 000164 177776 00000010720 15107057155 021236 0ustar00jenkinsnogroup000000 000000 // // error_code.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ERROR_CODE_HPP #define ASIO_ERROR_CODE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_SYSTEM_ERROR) # include #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) # include # include "asio/detail/noncopyable.hpp" # if !defined(ASIO_NO_IOSTREAM) # include # endif // !defined(ASIO_NO_IOSTREAM) #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::error_category error_category; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Base class for all error categories. class error_category : private noncopyable { public: /// Destructor. virtual ~error_category() { } /// Returns a string naming the error gategory. virtual const char* name() const = 0; /// Returns a string describing the error denoted by @c value. virtual std::string message(int value) const = 0; /// Equality operator to compare two error categories. bool operator==(const error_category& rhs) const { return this == &rhs; } /// Inequality operator to compare two error categories. bool operator!=(const error_category& rhs) const { return !(*this == rhs); } }; #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Returns the error category used for the system errors produced by asio. extern ASIO_DECL const error_category& system_category(); #if defined(ASIO_HAS_STD_SYSTEM_ERROR) typedef std::error_code error_code; #else // defined(ASIO_HAS_STD_SYSTEM_ERROR) /// Class to represent an error code value. class error_code { public: /// Default constructor. error_code() : value_(0), category_(&system_category()) { } /// Construct with specific error code and category. error_code(int v, const error_category& c) : value_(v), category_(&c) { } /// Construct from an error code enum. template error_code(ErrorEnum e) { *this = make_error_code(e); } /// Clear the error value to the default. void clear() { value_ = 0; category_ = &system_category(); } /// Assign a new error value. void assign(int v, const error_category& c) { value_ = v; category_ = &c; } /// Get the error value. int value() const { return value_; } /// Get the error category. const error_category& category() const { return *category_; } /// Get the message associated with the error. std::string message() const { return category_->message(value_); } struct unspecified_bool_type_t { }; typedef void (*unspecified_bool_type)(unspecified_bool_type_t); static void unspecified_bool_true(unspecified_bool_type_t) {} /// Operator returns non-null if there is a non-success error code. operator unspecified_bool_type() const { if (value_ == 0) return 0; else return &error_code::unspecified_bool_true; } /// Operator to test if the error represents success. bool operator!() const { return value_ == 0; } /// Equality operator to compare two error objects. friend bool operator==(const error_code& e1, const error_code& e2) { return e1.value_ == e2.value_ && e1.category_ == e2.category_; } /// Inequality operator to compare two error objects. friend bool operator!=(const error_code& e1, const error_code& e2) { return e1.value_ != e2.value_ || e1.category_ != e2.category_; } private: // The value associated with the error code. int value_; // The category associated with the error code. const error_category* category_; }; # if !defined(ASIO_NO_IOSTREAM) /// Output an error code. template std::basic_ostream& operator<<( std::basic_ostream& os, const error_code& ec) { os << ec.category().name() << ':' << ec.value(); return os; } # endif // !defined(ASIO_NO_IOSTREAM) #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/error_code.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_ERROR_CODE_HPP galera-4-26.4.25/asio/asio/read_until.hpp000644 000164 177776 00000337401 15107057155 021251 0ustar00jenkinsnogroup000000 000000 // // read_until.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_UNTIL_HPP #define ASIO_READ_UNTIL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/async_result.hpp" #include "asio/buffer.hpp" #include "asio/detail/regex_fwd.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #if !defined(ASIO_NO_EXTENSIONS) # include "asio/basic_streambuf_fwd.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { char (&has_result_type_helper(...))[2]; template char has_result_type_helper(T*, typename T::result_type* = 0); template struct has_result_type { enum { value = (sizeof((has_result_type_helper)((T*)(0))) == 1) }; }; } // namespace detail /// Type trait used to determine whether a type can be used as a match condition /// function with read_until and async_read_until. template struct is_match_condition { #if defined(GENERATING_DOCUMENTATION) /// The value member is true if the type may be used as a match condition. static const bool value; #else enum { value = asio::is_function< typename asio::remove_pointer::type>::value || detail::has_result_type::value }; #endif }; /** * @defgroup read_until asio::read_until * * @brief The @c read_until function is a composed operation that reads data * into a dynamic buffer sequence, or into a streambuf, until it contains a * delimiter, matches a regular expression, or a function object indicates a * match. */ /*@{*/ #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter character. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. * * @par Example * To read data into a @c std::string until a newline is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), '\n'); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter character. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter string. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. * * @par Example * To read data into a @c std::string until a CR-LF sequence is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), "\r\n"); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter string. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Read data into a dynamic buffer sequence until some part of the data it /// contains matches a regular expression. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains some data * that matches a regular expression. The call will block until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the * regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains data that matches the regular expression, the function returns * immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param expr The regular expression. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the substring that matches the regular expression. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent read_until operation to examine. * * @par Example * To read data into a @c std::string until a CR-LF sequence is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), boost::regex("\r\n")); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Read data into a dynamic buffer sequence until some part of the data it /// contains matches a regular expression. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains some data * that matches a regular expression. The call will block until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the * regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains data that matches the regular expression, the function returns * immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param expr The regular expression. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the substring that matches the regular expression. Returns 0 * if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Read data into a dynamic buffer sequence until a function object indicates a /// match. /** * This function is used to read data into the specified dynamic buffer * sequence until a user-defined match condition function object, when applied * to the data contained in the dynamic buffer sequence, indicates a successful * match. The call will block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @returns The number of bytes in the dynamic_buffer's get area that * have been fully consumed by the match function. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the function object. * An application will typically leave that data in the dynamic buffer sequence * for a subsequent read_until operation to examine. * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To read data into a dynamic buffer sequence until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::const_buffers_1> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * std::string data; * asio::read_until(s, data, match_whitespace); * @endcode * * To read data into a @c std::string until a matching character is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * std::string data; * asio::read_until(s, data, match_char('a')); * @endcode */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Read data into a dynamic buffer sequence until a function object indicates a /// match. /** * This function is used to read data into the specified dynamic buffer * sequence until a user-defined match condition function object, when applied * to the data contained in the dynamic buffer sequence, indicates a successful * match. The call will block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area that * have been fully consumed by the match function. Returns 0 if an error * occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the function object. * An application will typically leave that data in the dynamic buffer sequence * for a subsequent read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. */ template std::size_t read_until(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, asio::error_code& ec, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_IOSTREAM) /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter character. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. * * @par Example * To read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * asio::read_until(s, b, '\n'); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter character. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, char delim, asio::error_code& ec); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter string. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. * * @par Example * To read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * asio::read_until(s, b, "\r\n"); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim); /// Read data into a streambuf until it contains a specified delimiter. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains the specified delimiter. The call will block * until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains the * delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param delim The delimiter string. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond the delimiter. An application will typically leave * that data in the streambuf for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec); #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Read data into a streambuf until some part of the data it contains matches /// a regular expression. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains some data that matches a regular expression. * The call will block until one of the following conditions is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains data that * matches the regular expression, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param expr The regular expression. * * @returns The number of bytes in the streambuf's get area up to and including * the substring that matches the regular expression. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * read_until operation to examine. * * @par Example * To read data into a streambuf until a CR-LF sequence is encountered: * @code asio::streambuf b; * asio::read_until(s, b, boost::regex("\r\n")); * std::istream is(&b); * std::string line; * std::getline(is, line); @endcode * After the @c read_until operation completes successfully, the buffer @c b * contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr); /// Read data into a streambuf until some part of the data it contains matches /// a regular expression. /** * This function is used to read data into the specified streambuf until the * streambuf's get area contains some data that matches a regular expression. * The call will block until one of the following conditions is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the streambuf's get area already contains data that * matches the regular expression, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param expr The regular expression. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area up to and including * the substring that matches the regular expression. Returns 0 if an error * occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, asio::error_code& ec); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Read data into a streambuf until a function object indicates a match. /** * This function is used to read data into the specified streambuf until a * user-defined match condition function object, when applied to the data * contained in the streambuf, indicates a successful match. The call will * block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @returns The number of bytes in the streambuf's get area that have been fully * consumed by the match function. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the function object. An application * will typically leave that data in the streambuf for a subsequent read_until * operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To read data into a streambuf until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::streambuf::const_buffers_type> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * asio::streambuf b; * asio::read_until(s, b, match_whitespace); * @endcode * * To read data into a streambuf until a matching character is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * asio::streambuf b; * asio::read_until(s, b, match_char('a')); * @endcode */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, typename enable_if::value>::type* = 0); /// Read data into a streambuf until a function object indicates a match. /** * This function is used to read data into the specified streambuf until a * user-defined match condition function object, when applied to the data * contained in the streambuf, indicates a successful match. The call will * block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the streambuf's get area that have been fully * consumed by the match function. Returns 0 if an error occurred. * * @note After a successful read_until operation, the streambuf may contain * additional data beyond that which matched the function object. An application * will typically leave that data in the streambuf for a subsequent read_until * operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. */ template std::size_t read_until(SyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, asio::error_code& ec, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter character. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. * * @par Example * To read data into a @c std::string until a newline is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), '\n'); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, char delim, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter character. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, char delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter string. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. * * @par Example * To read data into a @c std::string until a CR-LF sequence is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), "\r\n"); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Read data into a dynamic buffer sequence until it contains a specified /// delimiter. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains the specified * delimiter. The call will block until one of the following conditions is * true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains the delimiter, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param delim The delimiter string. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the delimiter. Returns 0 if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond the delimiter. An application will * typically leave that data in the dynamic buffer sequence for a subsequent * read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Read data into a dynamic buffer sequence until some part of the data it /// contains matches a regular expression. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains some data * that matches a regular expression. The call will block until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the * regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains data that matches the regular expression, the function returns * immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param expr The regular expression. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the substring that matches the regular expression. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent read_until operation to examine. * * @par Example * To read data into a @c std::string until a CR-LF sequence is encountered: * @code std::string data; * std::string n = asio::read_until(s, * asio::dynamic_buffer(data), boost::regex("\r\n")); * std::string line = data.substr(0, n); * data.erase(0, n); @endcode * After the @c read_until operation completes successfully, the string @c data * contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c b as * follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c read_until operation. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Read data into a dynamic buffer sequence until some part of the data it /// contains matches a regular expression. /** * This function is used to read data into the specified dynamic buffer * sequence until the dynamic buffer sequence's get area contains some data * that matches a regular expression. The call will block until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the * regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the dynamic buffer sequence's get area already * contains data that matches the regular expression, the function returns * immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param expr The regular expression. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area up to * and including the substring that matches the regular expression. Returns 0 * if an error occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent read_until operation to examine. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Read data into a dynamic buffer sequence until a function object indicates a /// match. /** * This function is used to read data into the specified dynamic buffer * sequence until a user-defined match condition function object, when applied * to the data contained in the dynamic buffer sequence, indicates a successful * match. The call will block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @returns The number of bytes in the dynamic_buffer's get area that * have been fully consumed by the match function. * * @throws asio::system_error Thrown on failure. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the function object. * An application will typically leave that data in the dynamic buffer sequence * for a subsequent read_until operation to examine. * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To read data into a dynamic buffer sequence until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::const_buffers_1> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * std::string data; * asio::read_until(s, data, match_whitespace); * @endcode * * To read data into a @c std::string until a matching character is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * std::string data; * asio::read_until(s, data, match_char('a')); * @endcode */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type* = 0); /// Read data into a dynamic buffer sequence until a function object indicates a /// match. /** * This function is used to read data into the specified dynamic buffer * sequence until a user-defined match condition function object, when applied * to the data contained in the dynamic buffer sequence, indicates a successful * match. The call will block until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. If the match condition function object already indicates * a match, the function returns immediately. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers A dynamic buffer sequence into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes in the dynamic buffer sequence's get area that * have been fully consumed by the match function. Returns 0 if an error * occurred. * * @note After a successful read_until operation, the dynamic buffer sequence * may contain additional data beyond that which matched the function object. * An application will typically leave that data in the dynamic buffer sequence * for a subsequent read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. */ template std::size_t read_until(SyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, asio::error_code& ec, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type* = 0); #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ /** * @defgroup async_read_until asio::async_read_until * * @brief The @c async_read_until function is a composed asynchronous operation * that reads data into a dynamic buffer sequence, or into a streambuf, until * it contains a delimiter, matches a regular expression, or a function object * indicates a match. */ /*@{*/ #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until it contains a specified delimiter. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains the * specified delimiter. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains the delimiter, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param delim The delimiter character. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond the delimiter. An application * will typically leave that data in the dynamic buffer sequence for a * subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a newline is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, '\n', handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c data contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, char delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until it contains a specified delimiter. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains the * specified delimiter. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains the delimiter, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param delim The delimiter string. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond the delimiter. An application * will typically leave that data in the dynamic buffer sequence for a * subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a CR-LF sequence is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, "\r\n", handler); @endcode * After the @c async_read_until operation completes successfully, the string * @c data contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the string @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until some part of its data matches a regular expression. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains some * data that matches a regular expression. The function call always returns * immediately. The asynchronous operation will continue until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the regular * expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains data that matches * the regular expression, this asynchronous operation completes immediately. * The program must ensure that the stream performs no other read operations * (such as async_read, async_read_until, the stream's async_read_some * function, or any other composed operations that perform reads) until this * operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param expr The regular expression. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer * // sequence's get area up to and including the * // substring that matches the regular expression. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a CR-LF sequence is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, * boost::regex("\r\n"), handler); @endcode * After the @c async_read_until operation completes successfully, the string * @c data contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the match, * so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the string @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until a function object indicates a match. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until a user-defined match condition function object, when * applied to the data contained in the dynamic buffer sequence, indicates a * successful match. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the match condition function object already indicates a match, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area that have been fully consumed by the match * // function. O if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond that which matched the function * object. An application will typically leave that data in the dynamic buffer * sequence for a subsequent async_read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To asynchronously read data into a @c std::string until whitespace is * encountered: * @code typedef asio::buffers_iterator< * asio::const_buffers_1> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * void handler(const asio::error_code& e, std::size_t size); * ... * std::string data; * asio::async_read_until(s, data, match_whitespace, handler); * @endcode * * To asynchronously read data into a @c std::string until a matching character * is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * void handler(const asio::error_code& e, std::size_t size); * ... * std::string data; * asio::async_read_until(s, data, match_char('a'), handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_match_condition::value && is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to read data into a streambuf until it /// contains a specified delimiter. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains the specified delimiter. * The function call always returns immediately. The asynchronous operation * will continue until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains the delimiter, this asynchronous * operation completes immediately. The program must ensure that the stream * performs no other read operations (such as async_read, async_read_until, the * stream's async_read_some function, or any other composed operations that * perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param delim The delimiter character. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond the delimiter. An application will typically * leave that data in the streambuf for a subsequent async_read_until operation * to examine. * * @par Example * To asynchronously read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, '\n', handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, char delim, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read data into a streambuf until it /// contains a specified delimiter. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains the specified delimiter. * The function call always returns immediately. The asynchronous operation * will continue until one of the following conditions is true: * * @li The get area of the streambuf contains the specified delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains the delimiter, this asynchronous * operation completes immediately. The program must ensure that the stream * performs no other read operations (such as async_read, async_read_until, the * stream's async_read_some function, or any other composed operations that * perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param delim The delimiter string. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond the delimiter. An application will typically * leave that data in the streambuf for a subsequent async_read_until operation * to examine. * * @par Example * To asynchronously read data into a streambuf until a newline is encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, "\r\n", handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler); #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a streambuf until some /// part of its data matches a regular expression. /** * This function is used to asynchronously read data into the specified * streambuf until the streambuf's get area contains some data that matches a * regular expression. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li A substring of the streambuf's get area matches the regular expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the streambuf's get area already contains data that matches the regular * expression, this asynchronous operation completes immediately. The program * must ensure that the stream performs no other read operations (such as * async_read, async_read_until, the stream's async_read_some function, or any * other composed operations that perform reads) until this operation * completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. Ownership of * the streambuf is retained by the caller, which must guarantee that it remains * valid until the handler is called. * * @param expr The regular expression. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area up to and including the substring * // that matches the regular. expression. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond that which matched the regular expression. An * application will typically leave that data in the streambuf for a subsequent * async_read_until operation to examine. * * @par Example * To asynchronously read data into a streambuf until a CR-LF sequence is * encountered: * @code asio::streambuf b; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::istream is(&b); * std::string line; * std::getline(is, line); * ... * } * } * ... * asio::async_read_until(s, b, boost::regex("\r\n"), handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c b contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c std::getline then extracts the data up to and including the * newline (which is discarded), so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r' } @endcode * The remaining data is left in the buffer @c b as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a streambuf until a /// function object indicates a match. /** * This function is used to asynchronously read data into the specified * streambuf until a user-defined match condition function object, when applied * to the data contained in the streambuf, indicates a successful match. The * function call always returns immediately. The asynchronous operation will * continue until one of the following conditions is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the match condition function object already indicates a match, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A streambuf object into which the data will be read. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator::const_buffers_type> * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the streambuf's get * // area that have been fully consumed by the * // match function. O if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the streambuf may * contain additional data beyond that which matched the function object. An * application will typically leave that data in the streambuf for a subsequent * async_read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To asynchronously read data into a streambuf until whitespace is encountered: * @code typedef asio::buffers_iterator< * asio::streambuf::const_buffers_type> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * void handler(const asio::error_code& e, std::size_t size); * ... * asio::streambuf b; * asio::async_read_until(s, b, match_whitespace, handler); * @endcode * * To asynchronously read data into a streambuf until a matching character is * found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * void handler(const asio::error_code& e, std::size_t size); * ... * asio::streambuf b; * asio::async_read_until(s, b, match_char('a'), handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, asio::basic_streambuf& b, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if::value>::type* = 0); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until it contains a specified delimiter. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains the * specified delimiter. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains the delimiter, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param delim The delimiter character. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond the delimiter. An application * will typically leave that data in the dynamic buffer sequence for a * subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a newline is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, '\n', handler); @endcode * After the @c async_read_until operation completes successfully, the buffer * @c data contains the delimiter: * @code { 'a', 'b', ..., 'c', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\n' } @endcode * After the call to @c erase, the remaining data is left in the buffer @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, char delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until it contains a specified delimiter. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains the * specified delimiter. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The get area of the dynamic buffer sequence contains the specified * delimiter. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains the delimiter, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param delim The delimiter string. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area up to and including the delimiter. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond the delimiter. An application * will typically leave that data in the dynamic buffer sequence for a * subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a CR-LF sequence is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, "\r\n", handler); @endcode * After the @c async_read_until operation completes successfully, the string * @c data contains the delimiter: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the * delimiter, so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the string @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_STRING_VIEW_PARAM delim, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_BOOST_REGEX) \ || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until some part of its data matches a regular expression. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until the dynamic buffer sequence's get area contains some * data that matches a regular expression. The function call always returns * immediately. The asynchronous operation will continue until one of the * following conditions is true: * * @li A substring of the dynamic buffer sequence's get area matches the regular * expression. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the dynamic buffer sequence's get area already contains data that matches * the regular expression, this asynchronous operation completes immediately. * The program must ensure that the stream performs no other read operations * (such as async_read, async_read_until, the stream's async_read_some * function, or any other composed operations that perform reads) until this * operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param expr The regular expression. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer * // sequence's get area up to and including the * // substring that matches the regular expression. * // 0 if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond that which matched the regular * expression. An application will typically leave that data in the dynamic * buffer sequence for a subsequent async_read_until operation to examine. * * @par Example * To asynchronously read data into a @c std::string until a CR-LF sequence is * encountered: * @code std::string data; * ... * void handler(const asio::error_code& e, std::size_t size) * { * if (!e) * { * std::string line = data.substr(0, n); * data.erase(0, n); * ... * } * } * ... * asio::async_read_until(s, data, * boost::regex("\r\n"), handler); @endcode * After the @c async_read_until operation completes successfully, the string * @c data contains the data which matched the regular expression: * @code { 'a', 'b', ..., 'c', '\r', '\n', 'd', 'e', ... } @endcode * The call to @c substr then extracts the data up to and including the match, * so that the string @c line contains: * @code { 'a', 'b', ..., 'c', '\r', '\n' } @endcode * After the call to @c erase, the remaining data is left in the string @c data * as follows: * @code { 'd', 'e', ... } @endcode * This data may be the start of a new line, to be extracted by a subsequent * @c async_read_until operation. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, const boost::regex& expr, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); #endif // defined(ASIO_HAS_BOOST_REGEX) // || defined(GENERATING_DOCUMENTATION) /// Start an asynchronous operation to read data into a dynamic buffer sequence /// until a function object indicates a match. /** * This function is used to asynchronously read data into the specified dynamic * buffer sequence until a user-defined match condition function object, when * applied to the data contained in the dynamic buffer sequence, indicates a * successful match. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions * is true: * * @li The match condition function object returns a std::pair where the second * element evaluates to true. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. If * the match condition function object already indicates a match, this * asynchronous operation completes immediately. The program must ensure that * the stream performs no other read operations (such as async_read, * async_read_until, the stream's async_read_some function, or any other * composed operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param match_condition The function object to be called to determine whether * a match exists. The signature of the function object must be: * @code pair match_condition(iterator begin, iterator end); * @endcode * where @c iterator represents the type: * @code buffers_iterator * @endcode * The iterator parameters @c begin and @c end define the range of bytes to be * scanned to determine whether there is a match. The @c first member of the * return value is an iterator marking one-past-the-end of the bytes that have * been consumed by the match function. This iterator is used to calculate the * @c begin parameter for any subsequent invocation of the match condition. The * @c second member of the return value is true if a match has been found, false * otherwise. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // The number of bytes in the dynamic buffer sequence's * // get area that have been fully consumed by the match * // function. O if an error occurred. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note After a successful async_read_until operation, the dynamic buffer * sequence may contain additional data beyond that which matched the function * object. An application will typically leave that data in the dynamic buffer * sequence for a subsequent async_read_until operation to examine. * * @note The default implementation of the @c is_match_condition type trait * evaluates to true for function pointers and function objects with a * @c result_type typedef. It must be specialised for other user-defined * function objects. * * @par Examples * To asynchronously read data into a @c std::string until whitespace is * encountered: * @code typedef asio::buffers_iterator< * asio::const_buffers_1> iterator; * * std::pair * match_whitespace(iterator begin, iterator end) * { * iterator i = begin; * while (i != end) * if (std::isspace(*i++)) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * ... * void handler(const asio::error_code& e, std::size_t size); * ... * std::string data; * asio::async_read_until(s, data, match_whitespace, handler); * @endcode * * To asynchronously read data into a @c std::string until a matching character * is found: * @code class match_char * { * public: * explicit match_char(char c) : c_(c) {} * * template * std::pair operator()( * Iterator begin, Iterator end) const * { * Iterator i = begin; * while (i != end) * if (c_ == *i++) * return std::make_pair(i, true); * return std::make_pair(i, false); * } * * private: * char c_; * }; * * namespace asio { * template <> struct is_match_condition * : public boost::true_type {}; * } // namespace asio * ... * void handler(const asio::error_code& e, std::size_t size); * ... * std::string data; * asio::async_read_until(s, data, match_char('a'), handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_until(AsyncReadStream& s, DynamicBuffer_v2 buffers, MatchCondition match_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_match_condition::value && is_dynamic_buffer_v2::value >::type* = 0); #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read_until.hpp" #endif // ASIO_READ_UNTIL_HPP galera-4-26.4.25/asio/asio/co_spawn.hpp000644 000164 177776 00000004535 15107057155 020733 0ustar00jenkinsnogroup000000 000000 // // co_spawn.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_CO_SPAWN_HPP #define ASIO_CO_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #include "asio/awaitable.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct awaitable_signature; template struct awaitable_signature> { typedef void type(std::exception_ptr, T); }; template struct awaitable_signature> { typedef void type(std::exception_ptr); }; } // namespace detail /// Spawn a new thread of execution. /** * The entry point function object @c f must have the signature: * * @code awaitable f(); @endcode * * where @c E is convertible from @c Executor. */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, typename detail::awaitable_signature::type>::type) co_spawn(const Executor& ex, F&& f, CompletionToken&& token, typename enable_if< is_executor::value >::type* = 0); /// Spawn a new thread of execution. /** * The entry point function object @c f must have the signature: * * @code awaitable f(); @endcode * * where @c E is convertible from @c ExecutionContext::executor_type. */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, typename detail::awaitable_signature::type>::type) co_spawn(ExecutionContext& ctx, F&& f, CompletionToken&& token, typename enable_if< is_convertible::value >::type* = 0); } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/co_spawn.hpp" #endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_CO_SPAWN_HPP galera-4-26.4.25/asio/asio/ts/000755 000164 177776 00000000000 15107057160 017024 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ts/net.hpp000644 000164 177776 00000001243 15107057155 020327 0ustar00jenkinsnogroup000000 000000 // // ts/net.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_NET_HPP #define ASIO_TS_NET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/ts/netfwd.hpp" #include "asio/ts/executor.hpp" #include "asio/ts/io_context.hpp" #include "asio/ts/timer.hpp" #include "asio/ts/buffer.hpp" #include "asio/ts/socket.hpp" #include "asio/ts/internet.hpp" #endif // ASIO_TS_NET_HPP galera-4-26.4.25/asio/asio/ts/netfwd.hpp000644 000164 177776 00000011465 15107057155 021037 0ustar00jenkinsnogroup000000 000000 // // ts/netfwd.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_NETFWD_HPP #define ASIO_TS_NETFWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CHRONO) # include "asio/detail/chrono.hpp" #endif // defined(ASIO_HAS_CHRONO) #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/detail/date_time_fwd.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #if !defined(GENERATING_DOCUMENTATION) #include "asio/detail/push_options.hpp" namespace asio { class execution_context; template class executor_binder; template class executor_work_guard; class system_executor; class executor; template class strand; class io_context; template struct wait_traits; #if defined(ASIO_HAS_BOOST_DATE_TIME) template struct time_traits; #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #if !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL) #define ASIO_BASIC_WAITABLE_TIMER_FWD_DECL template , typename Executor = executor> class basic_waitable_timer; #endif // !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL) #if defined(ASIO_HAS_CHRONO) typedef basic_waitable_timer system_timer; typedef basic_waitable_timer steady_timer; typedef basic_waitable_timer high_resolution_timer; #endif // defined(ASIO_HAS_CHRONO) #if !defined(ASIO_BASIC_SOCKET_FWD_DECL) #define ASIO_BASIC_SOCKET_FWD_DECL template class basic_socket; #endif // !defined(ASIO_BASIC_SOCKET_FWD_DECL) #if !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL) #define ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL template class basic_datagram_socket; #endif // !defined(ASIO_BASIC_DATAGRAM_SOCKET_FWD_DECL) #if !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL) #define ASIO_BASIC_STREAM_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_stream_socket; #endif // !defined(ASIO_BASIC_STREAM_SOCKET_FWD_DECL) #if !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL) #define ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL template class basic_socket_acceptor; #endif // !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL) #if !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL) #define ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL // Forward declaration with defaulted arguments. template > #else typename Clock = chrono::steady_clock, typename WaitTraits = wait_traits > #endif class basic_socket_streambuf; #endif // !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL) #if !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL) #define ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL // Forward declaration with defaulted arguments. template > #else typename Clock = chrono::steady_clock, typename WaitTraits = wait_traits > #endif class basic_socket_iostream; #endif // !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL) namespace ip { class address; class address_v4; class address_v6; template class basic_address_iterator; typedef basic_address_iterator address_v4_iterator; typedef basic_address_iterator address_v6_iterator; template class basic_address_range; typedef basic_address_range address_v4_range; typedef basic_address_range address_v6_range; class network_v4; class network_v6; template class basic_endpoint; template class basic_resolver_entry; template class basic_resolver_results; #if !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL) #define ASIO_IP_BASIC_RESOLVER_FWD_DECL template class basic_resolver; #endif // !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL) class tcp; class udp; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(GENERATING_DOCUMENTATION) #endif // ASIO_TS_NETFWD_HPP galera-4-26.4.25/asio/asio/ts/timer.hpp000644 000164 177776 00000001254 15107057155 020663 0ustar00jenkinsnogroup000000 000000 // // ts/timer.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_TIMER_HPP #define ASIO_TS_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/chrono.hpp" #include "asio/wait_traits.hpp" #include "asio/basic_waitable_timer.hpp" #include "asio/system_timer.hpp" #include "asio/steady_timer.hpp" #include "asio/high_resolution_timer.hpp" #endif // ASIO_TS_TIMER_HPP galera-4-26.4.25/asio/asio/ts/socket.hpp000644 000164 177776 00000001407 15107057155 021033 0ustar00jenkinsnogroup000000 000000 // // ts/socket.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_SOCKET_HPP #define ASIO_TS_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/socket_base.hpp" #include "asio/basic_socket.hpp" #include "asio/basic_datagram_socket.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_streambuf.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/connect.hpp" #endif // ASIO_TS_SOCKET_HPP galera-4-26.4.25/asio/asio/ts/io_context.hpp000644 000164 177776 00000001014 15107057155 021710 0ustar00jenkinsnogroup000000 000000 // // ts/io_context.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_IO_CONTEXT_HPP #define ASIO_TS_IO_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/io_context.hpp" #endif // ASIO_TS_IO_CONTEXT_HPP galera-4-26.4.25/asio/asio/ts/internet.hpp000644 000164 177776 00000002341 15107057155 021371 0ustar00jenkinsnogroup000000 000000 // // ts/internet.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_INTERNET_HPP #define ASIO_TS_INTERNET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/ip/address.hpp" #include "asio/ip/address_v4.hpp" #include "asio/ip/address_v4_iterator.hpp" #include "asio/ip/address_v4_range.hpp" #include "asio/ip/address_v6.hpp" #include "asio/ip/address_v6_iterator.hpp" #include "asio/ip/address_v6_range.hpp" #include "asio/ip/bad_address_cast.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/basic_resolver_entry.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/host_name.hpp" #include "asio/ip/network_v4.hpp" #include "asio/ip/network_v6.hpp" #include "asio/ip/tcp.hpp" #include "asio/ip/udp.hpp" #include "asio/ip/v6_only.hpp" #include "asio/ip/unicast.hpp" #include "asio/ip/multicast.hpp" #endif // ASIO_TS_INTERNET_HPP galera-4-26.4.25/asio/asio/ts/executor.hpp000644 000164 177776 00000001722 15107057155 021401 0ustar00jenkinsnogroup000000 000000 // // ts/executor.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_EXECUTOR_HPP #define ASIO_TS_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/async_result.hpp" #include "asio/associated_allocator.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/associated_executor.hpp" #include "asio/bind_executor.hpp" #include "asio/executor_work_guard.hpp" #include "asio/system_executor.hpp" #include "asio/executor.hpp" #include "asio/dispatch.hpp" #include "asio/post.hpp" #include "asio/defer.hpp" #include "asio/strand.hpp" #include "asio/packaged_task.hpp" #include "asio/use_future.hpp" #endif // ASIO_TS_EXECUTOR_HPP galera-4-26.4.25/asio/asio/ts/buffer.hpp000644 000164 177776 00000001157 15107057155 021016 0ustar00jenkinsnogroup000000 000000 // // ts/buffer.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_TS_BUFFER_HPP #define ASIO_TS_BUFFER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/read.hpp" #include "asio/write.hpp" #include "asio/read_until.hpp" #endif // ASIO_TS_BUFFER_HPP galera-4-26.4.25/asio/asio/deadline_timer.hpp000644 000164 177776 00000002033 15107057155 022056 0ustar00jenkinsnogroup000000 000000 // // deadline_timer.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DEADLINE_TIMER_HPP #define ASIO_DEADLINE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/socket_types.hpp" // Must come before posix_time. #include "asio/basic_deadline_timer.hpp" #include namespace asio { /// Typedef for the typical usage of timer. Uses a UTC clock. typedef basic_deadline_timer deadline_timer; } // namespace asio #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_DEADLINE_TIMER_HPP galera-4-26.4.25/asio/asio/buffered_write_stream_fwd.hpp000644 000164 177776 00000001210 15107057155 024314 0ustar00jenkinsnogroup000000 000000 // // buffered_write_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_WRITE_STREAM_FWD_HPP #define ASIO_BUFFERED_WRITE_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_write_stream; } // namespace asio #endif // ASIO_BUFFERED_WRITE_STREAM_FWD_HPP galera-4-26.4.25/asio/asio/unyield.hpp000644 000164 177776 00000000575 15107057155 020573 0ustar00jenkinsnogroup000000 000000 // // unyield.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifdef reenter # undef reenter #endif #ifdef yield # undef yield #endif #ifdef fork # undef fork #endif galera-4-26.4.25/asio/asio/version.hpp000644 000164 177776 00000001204 15107057155 020575 0ustar00jenkinsnogroup000000 000000 // // version.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_VERSION_HPP #define ASIO_VERSION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) // ASIO_VERSION % 100 is the sub-minor version // ASIO_VERSION / 100 % 1000 is the minor version // ASIO_VERSION / 100000 is the major version #define ASIO_VERSION 101401 // 1.14.1 #endif // ASIO_VERSION_HPP galera-4-26.4.25/asio/asio/handler_invoke_hook.hpp000644 000164 177776 00000005154 15107057155 023130 0ustar00jenkinsnogroup000000 000000 // // handler_invoke_hook.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_INVOKE_HOOK_HPP #define ASIO_HANDLER_INVOKE_HOOK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { /** @defgroup asio_handler_invoke asio::asio_handler_invoke * * @brief Default invoke function for handlers. * * Completion handlers for asynchronous operations are invoked by the * io_context associated with the corresponding object (e.g. a socket or * deadline_timer). Certain guarantees are made on when the handler may be * invoked, in particular that a handler can only be invoked from a thread that * is currently calling @c run() on the corresponding io_context object. * Handlers may subsequently be invoked through other objects (such as * io_context::strand objects) that provide additional guarantees. * * When asynchronous operations are composed from other asynchronous * operations, all intermediate handlers should be invoked using the same * method as the final handler. This is required to ensure that user-defined * objects are not accessed in a way that may violate the guarantees. This * hooking function ensures that the invoked method used for the final handler * is accessible at each intermediate step. * * Implement asio_handler_invoke for your own handlers to specify a custom * invocation strategy. * * This default implementation invokes the function object like so: * @code function(); @endcode * If necessary, the default implementation makes a copy of the function object * so that the non-const operator() can be used. * * @par Example * @code * class my_handler; * * template * void asio_handler_invoke(Function function, my_handler* context) * { * context->strand_.dispatch(function); * } * @endcode */ /*@{*/ /// Default handler invocation hook used for non-const function objects. template inline void asio_handler_invoke(Function& function, ...) { function(); } /// Default handler invocation hook used for const function objects. template inline void asio_handler_invoke(const Function& function, ...) { Function tmp(function); tmp(); } /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_HANDLER_INVOKE_HOOK_HPP galera-4-26.4.25/asio/asio/ssl.hpp000644 000164 177776 00000001331 15107057155 017712 0ustar00jenkinsnogroup000000 000000 // // ssl.hpp // ~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_HPP #define ASIO_SSL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/ssl/context.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/rfc2818_verification.hpp" #include "asio/ssl/stream.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/verify_context.hpp" #include "asio/ssl/verify_mode.hpp" #endif // ASIO_SSL_HPP galera-4-26.4.25/asio/asio/detail/000755 000164 177776 00000000000 15107057160 017640 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/detail/gcc_sync_fenced_block.hpp000644 000164 177776 00000003076 15107057155 024631 0ustar00jenkinsnogroup000000 000000 // // detail/gcc_sync_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_sync_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit gcc_sync_fenced_block(half_or_full_t) : value_(0) { __sync_lock_test_and_set(&value_, 1); } // Destructor. ~gcc_sync_fenced_block() { __sync_lock_release(&value_); } private: int value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) // && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) // && !defined(__INTEL_COMPILER) && !defined(__ICL) // && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) #endif // ASIO_DETAIL_GCC_SYNC_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/win_thread.hpp000644 000164 177776 00000005744 15107057155 022513 0ustar00jenkinsnogroup000000 000000 // // detail/win_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_THREAD_HPP #define ASIO_DETAIL_WIN_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_APP) \ && !defined(UNDER_CE) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) ASIO_DECL void __stdcall apc_function(ULONG data); #else ASIO_DECL void __stdcall apc_function(ULONG_PTR data); #endif template class win_thread_base { public: static bool terminate_threads() { return ::InterlockedExchangeAdd(&terminate_threads_, 0) != 0; } static void set_terminate_threads(bool b) { ::InterlockedExchange(&terminate_threads_, b ? 1 : 0); } private: static long terminate_threads_; }; template long win_thread_base::terminate_threads_ = 0; class win_thread : private noncopyable, public win_thread_base { public: // Constructor. template win_thread(Function f, unsigned int stack_size = 0) : thread_(0), exit_event_(0) { start_thread(new func(f), stack_size); } // Destructor. ASIO_DECL ~win_thread(); // Wait for the thread to exit. ASIO_DECL void join(); // Get number of CPUs. ASIO_DECL static std::size_t hardware_concurrency(); private: friend ASIO_DECL unsigned int __stdcall win_thread_function(void* arg); #if defined(WINVER) && (WINVER < 0x0500) friend ASIO_DECL void __stdcall apc_function(ULONG); #else friend ASIO_DECL void __stdcall apc_function(ULONG_PTR); #endif class func_base { public: virtual ~func_base() {} virtual void run() = 0; ::HANDLE entry_event_; ::HANDLE exit_event_; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg, unsigned int stack_size); ::HANDLE thread_; ::HANDLE exit_event_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_APP) // && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_THREAD_HPP galera-4-26.4.25/asio/asio/detail/array_fwd.hpp000644 000164 177776 00000001622 15107057155 022334 0ustar00jenkinsnogroup000000 000000 // // detail/array_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_FWD_HPP #define ASIO_DETAIL_ARRAY_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { template class array; } // namespace boost // Standard library components can't be forward declared, so we'll have to // include the array header. Fortunately, it's fairly lightweight and doesn't // add significantly to the compile time. #if defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) #endif // ASIO_DETAIL_ARRAY_FWD_HPP galera-4-26.4.25/asio/asio/detail/descriptor_ops.hpp000644 000164 177776 00000006307 15107057155 023422 0ustar00jenkinsnogroup000000 000000 // // detail/descriptor_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_OPS_HPP #define ASIO_DETAIL_DESCRIPTOR_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include #include "asio/error.hpp" #include "asio/error_code.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { // Descriptor state bits. enum { // The user wants a non-blocking descriptor. user_set_non_blocking = 1, // The descriptor has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the descriptor is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // The descriptor may have been dup()-ed. possible_dup = 4 }; typedef unsigned char state_type; template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { ec = asio::error_code(errno, asio::error::get_system_category()); return return_value; } ASIO_DECL int open(const char* path, int flags, asio::error_code& ec); ASIO_DECL int close(int d, state_type& state, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec); typedef iovec buf; ASIO_DECL std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec); ASIO_DECL bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred); ASIO_DECL int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, asio::error_code& ec); ASIO_DECL int fcntl(int d, int cmd, long arg, asio::error_code& ec); ASIO_DECL int poll_read(int d, state_type state, asio::error_code& ec); ASIO_DECL int poll_write(int d, state_type state, asio::error_code& ec); ASIO_DECL int poll_error(int d, state_type state, asio::error_code& ec); } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/descriptor_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_OPS_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_service.hpp000644 000164 177776 00000041636 15107057155 025261 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/socket_base.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_accept_op.hpp" #include "asio/detail/reactive_socket_connect_op.hpp" #include "asio/detail/reactive_socket_recvfrom_op.hpp" #include "asio/detail/reactive_socket_sendto_op.hpp" #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_service : public execution_context_service_base >, public reactive_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct implementation_type : reactive_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. reactive_socket_service(execution_context& context) : execution_context_service_base< reactive_socket_service >(context), reactive_socket_service_base(context) { } // Destroy all user-defined handler objects owned by the service. void shutdown() { this->base_shutdown(); } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, reactive_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, reactive_socket_service&, typename reactive_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) impl.protocol_ = protocol; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) impl.protocol_ = protocol; return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { return impl.socket_; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, false, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_sendto_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, destination, flags, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_send_to")); start_op(impl, reactor::write_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_send_to(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvfrom_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; int protocol = impl.protocol_.type(); p.p = new (p.v) op(impl.socket_, protocol, buffers, sender_endpoint, flags, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive_from")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, true, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); peer.assign(impl.protocol_, new_socket.get(), ec); if (!ec) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects must be // valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, peer, impl.protocol_, peer_endpoint, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_accept")); start_accept_op(impl, p.p, is_continuation, peer.is_open()); p.v = p.p = 0; } #if defined(ASIO_HAS_MOVE) // Start an asynchronous accept. The peer_endpoint object must be valid until // the accept's handler is invoked. template void async_move_accept(implementation_type& impl, const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_move_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(peer_io_ex, impl.socket_, impl.state_, impl.protocol_, peer_endpoint, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_accept")); start_accept_op(impl, p.p, is_continuation, false); p.v = p.p = 0; } #endif // defined(ASIO_HAS_MOVE) // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_connect")); start_connect_op(impl, p.p, is_continuation, peer_endpoint.data(), peer_endpoint.size()); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/signal_handler.hpp000644 000164 177776 00000005265 15107057155 023337 0ustar00jenkinsnogroup000000 000000 // // detail/signal_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_HANDLER_HPP #define ASIO_DETAIL_SIGNAL_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_work.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_handler : public signal_op { public: ASIO_DEFINE_HANDLER_PTR(signal_handler); signal_handler(Handler& h, const IoExecutor& io_ex) : signal_op(&signal_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. signal_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; handler_work w(h->handler_, h->io_executor_); ASIO_HANDLER_COMPLETION((*h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(h->handler_, h->ec_, h->signal_number_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_HANDLER_HPP galera-4-26.4.25/asio/asio/detail/descriptor_read_op.hpp000644 000164 177776 00000007721 15107057155 024233 0ustar00jenkinsnogroup000000 000000 // // detail/descriptor_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_work.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_read_op_base : public reactor_op { public: descriptor_read_op_base(int descriptor, const MutableBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_read_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static status do_perform(reactor_op* base) { descriptor_read_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = descriptor_ops::non_blocking_read(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_) ? done : not_done; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_read", o->ec_, o->bytes_transferred_)); return result; } private: int descriptor_; MutableBufferSequence buffers_; }; template class descriptor_read_op : public descriptor_read_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_read_op); descriptor_read_op(int descriptor, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : descriptor_read_op_base( descriptor, buffers, &descriptor_read_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP galera-4-26.4.25/asio/asio/detail/handler_type_requirements.hpp000644 000164 177776 00000044142 15107057155 025643 0ustar00jenkinsnogroup000000 000000 // // detail/handler_type_requirements.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #define ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" // Older versions of gcc have difficulty compiling the sizeof expressions where // we test the handler type requirements. We'll disable checking of handler type // requirements for those compilers, but otherwise enable it by default. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) # if !defined(__GNUC__) || (__GNUC__ >= 4) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS 1 # endif // !defined(__GNUC__) || (__GNUC__ >= 4) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) // With C++0x we can use a combination of enhanced SFINAE and static_assert to // generate better template error messages. As this technique is not yet widely // portable, we'll only enable it for tested compilers. #if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # if defined(__clang__) # if __has_feature(__cxx_static_assert__) # define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1 # endif // __has_feature(cxx_static_assert) # endif // defined(__clang__) #endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS) #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # include "asio/async_result.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) namespace asio { namespace detail { #if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) # if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template auto zero_arg_copyable_handler_test(Handler h, void*) -> decltype( sizeof(Handler(static_cast(h))), ((h)()), char(0)); template char (&zero_arg_copyable_handler_test(Handler, ...))[2]; template auto one_arg_handler_test(Handler h, Arg1* a1) -> decltype( sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))), ((h)(*a1)), char(0)); template char (&one_arg_handler_test(Handler h, ...))[2]; template auto two_arg_handler_test(Handler h, Arg1* a1, Arg2* a2) -> decltype( sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))), ((h)(*a1, *a2)), char(0)); template char (&two_arg_handler_test(Handler, ...))[2]; template auto two_arg_move_handler_test(Handler h, Arg1* a1, Arg2* a2) -> decltype( sizeof(Handler(ASIO_MOVE_CAST(Handler)(h))), ((h)(*a1, ASIO_MOVE_CAST(Arg2)(*a2))), char(0)); template char (&two_arg_move_handler_test(Handler, ...))[2]; # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) \ static_assert(expr, msg); # else // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) # define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) # endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT) template T& lvref(); template T& lvref(T); template const T& clvref(); template const T& clvref(T); #if defined(ASIO_HAS_MOVE) template T rvref(); template T rvref(T); #else // defined(ASIO_HAS_MOVE) template const T& rvref(); template const T& rvref(T); #endif // defined(ASIO_HAS_MOVE) template char argbyv(T); template struct handler_type_requirements { }; #define ASIO_LEGACY_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void()) asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::zero_arg_copyable_handler_test( \ asio::detail::clvref< \ asio_true_handler_type>(), 0)) == 1, \ "CompletionHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::clvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()(), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ReadHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "WriteHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "AcceptHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \ handler_type, handler, socket_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, socket_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_move_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "MoveAcceptHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::rvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_RANGE_CONNECT_HANDLER_CHECK( \ handler_type, handler, endpoint_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, endpoint_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "RangeConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, iter_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "IteratorConnectHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, range_type) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, range_type)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "ResolveHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "WaitHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, int)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "SignalHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "HandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code, std::size_t)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::two_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0), \ static_cast(0))) == 1, \ "BufferedHandshakeHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref(), \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ \ typedef ASIO_HANDLER_TYPE(handler_type, \ void(asio::error_code)) \ asio_true_handler_type; \ \ ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \ sizeof(asio::detail::one_arg_handler_test( \ asio::detail::rvref< \ asio_true_handler_type>(), \ static_cast(0))) == 1, \ "ShutdownHandler type requirements not met") \ \ typedef asio::detail::handler_type_requirements< \ sizeof( \ asio::detail::argbyv( \ asio::detail::rvref< \ asio_true_handler_type>())) + \ sizeof( \ asio::detail::lvref< \ asio_true_handler_type>()( \ asio::detail::lvref()), \ char(0))> ASIO_UNUSED_TYPEDEF #else // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) #define ASIO_LEGACY_COMPLETION_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_READ_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WRITE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_ACCEPT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \ handler_type, handler, socket_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_CONNECT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_RANGE_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_RESOLVE_HANDLER_CHECK( \ handler_type, handler, iter_type) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_WAIT_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SIGNAL_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #define ASIO_SHUTDOWN_HANDLER_CHECK( \ handler_type, handler) \ typedef int ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP galera-4-26.4.25/asio/asio/detail/strand_service.hpp000644 000164 177776 00000010743 15107057155 023375 0ustar00jenkinsnogroup000000 000000 // // detail/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STRAND_SERVICE_HPP #define ASIO_DETAIL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/io_context.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Default service implementation for a strand. class strand_service : public asio::detail::service_base { private: // Helper class to re-post the strand on exit. struct on_do_complete_exit; // Helper class to re-post the strand on exit. struct on_dispatch_exit; public: // The underlying implementation of a strand. class strand_impl : public operation { public: strand_impl(); private: // Only this service will have access to the internal values. friend class strand_service; friend struct on_do_complete_exit; friend struct on_dispatch_exit; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Indicates whether the strand is currently "locked" by a handler. This // means that there is a handler upcall in progress, or that the strand // itself has been scheduled in order to invoke some pending handlers. bool locked_; // The handlers that are waiting on the strand but should not be run until // after the next time the strand is scheduled. This queue must only be // modified while the mutex is locked. op_queue waiting_queue_; // The handlers that are ready to be run. Logically speaking, these are the // handlers that hold the strand's lock. The ready queue is only modified // from within the strand and so may be accessed without locking the mutex. op_queue ready_queue_; }; typedef strand_impl* implementation_type; // Construct a new strand service for the specified io_context. ASIO_DECL explicit strand_service(asio::io_context& io_context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new strand implementation. ASIO_DECL void construct(implementation_type& impl); // Request the io_context to invoke the given handler. template void dispatch(implementation_type& impl, Handler& handler); // Request the io_context to invoke the given handler and return immediately. template void post(implementation_type& impl, Handler& handler); // Determine whether the strand is running in the current thread. ASIO_DECL bool running_in_this_thread( const implementation_type& impl) const; private: // Helper function to dispatch a handler. Returns true if the handler should // be dispatched immediately. ASIO_DECL bool do_dispatch(implementation_type& impl, operation* op); // Helper fiunction to post a handler. ASIO_DECL void do_post(implementation_type& impl, operation* op, bool is_continuation); ASIO_DECL static void do_complete(void* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); // The io_context implementation used to post completions. io_context_impl& io_context_; // Mutex to protect access to the array of implementations. asio::detail::mutex mutex_; // Number of implementations shared between all strand objects. #if defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = ASIO_STRAND_IMPLEMENTATIONS }; #else // defined(ASIO_STRAND_IMPLEMENTATIONS) enum { num_implementations = 193 }; #endif // defined(ASIO_STRAND_IMPLEMENTATIONS) // Pool of implementations. scoped_ptr implementations_[num_implementations]; // Extra value used when hashing to prevent recycled memory locations from // getting the same strand implementation. std::size_t salt_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/strand_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/strand_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_STRAND_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/object_pool.hpp000644 000164 177776 00000006541 15107057155 022662 0ustar00jenkinsnogroup000000 000000 // // detail/object_pool.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OBJECT_POOL_HPP #define ASIO_DETAIL_OBJECT_POOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class object_pool; class object_pool_access { public: template static Object* create() { return new Object; } template static Object* create(Arg arg) { return new Object(arg); } template static void destroy(Object* o) { delete o; } template static Object*& next(Object* o) { return o->next_; } template static Object*& prev(Object* o) { return o->prev_; } }; template class object_pool : private noncopyable { public: // Constructor. object_pool() : live_list_(0), free_list_(0) { } // Destructor destroys all objects. ~object_pool() { destroy_list(live_list_); destroy_list(free_list_); } // Get the object at the start of the live list. Object* first() { return live_list_; } // Allocate a new object. Object* alloc() { Object* o = free_list_; if (o) free_list_ = object_pool_access::next(free_list_); else o = object_pool_access::create(); object_pool_access::next(o) = live_list_; object_pool_access::prev(o) = 0; if (live_list_) object_pool_access::prev(live_list_) = o; live_list_ = o; return o; } // Allocate a new object with an argument. template Object* alloc(Arg arg) { Object* o = free_list_; if (o) free_list_ = object_pool_access::next(free_list_); else o = object_pool_access::create(arg); object_pool_access::next(o) = live_list_; object_pool_access::prev(o) = 0; if (live_list_) object_pool_access::prev(live_list_) = o; live_list_ = o; return o; } // Free an object. Moves it to the free list. No destructors are run. void free(Object* o) { if (live_list_ == o) live_list_ = object_pool_access::next(o); if (object_pool_access::prev(o)) { object_pool_access::next(object_pool_access::prev(o)) = object_pool_access::next(o); } if (object_pool_access::next(o)) { object_pool_access::prev(object_pool_access::next(o)) = object_pool_access::prev(o); } object_pool_access::next(o) = free_list_; object_pool_access::prev(o) = 0; free_list_ = o; } private: // Helper function to destroy all elements in a list. void destroy_list(Object* list) { while (list) { Object* o = list; list = object_pool_access::next(o); object_pool_access::destroy(o); } } // The list of live objects. Object* live_list_; // The free list. Object* free_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OBJECT_POOL_HPP galera-4-26.4.25/asio/asio/detail/old_win_sdk_compat.hpp000644 000164 177776 00000010425 15107057155 024216 0ustar00jenkinsnogroup000000 000000 // // detail/old_win_sdk_compat.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #define ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Guess whether we are building against on old Platform SDK. #if !defined(IN6ADDR_ANY_INIT) #define ASIO_HAS_OLD_WIN_SDK 1 #endif // !defined(IN6ADDR_ANY_INIT) #if defined(ASIO_HAS_OLD_WIN_SDK) // Emulation of types that are missing from old Platform SDKs. // // N.B. this emulation is also used if building for a Windows 2000 target with // a recent (i.e. Vista or later) SDK, as the SDK does not provide IPv6 support // in that case. #include "asio/detail/push_options.hpp" namespace asio { namespace detail { enum { sockaddr_storage_maxsize = 128, // Maximum size. sockaddr_storage_alignsize = (sizeof(__int64)), // Desired alignment. sockaddr_storage_pad1size = (sockaddr_storage_alignsize - sizeof(short)), sockaddr_storage_pad2size = (sockaddr_storage_maxsize - (sizeof(short) + sockaddr_storage_pad1size + sockaddr_storage_alignsize)) }; struct sockaddr_storage_emulation { short ss_family; char __ss_pad1[sockaddr_storage_pad1size]; __int64 __ss_align; char __ss_pad2[sockaddr_storage_pad2size]; }; struct in6_addr_emulation { union { u_char Byte[16]; u_short Word[8]; } u; }; #if !defined(s6_addr) # define _S6_un u # define _S6_u8 Byte # define s6_addr _S6_un._S6_u8 #endif // !defined(s6_addr) struct sockaddr_in6_emulation { short sin6_family; u_short sin6_port; u_long sin6_flowinfo; in6_addr_emulation sin6_addr; u_long sin6_scope_id; }; struct ipv6_mreq_emulation { in6_addr_emulation ipv6mr_multiaddr; unsigned int ipv6mr_interface; }; struct addrinfo_emulation { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; size_t ai_addrlen; char* ai_canonname; sockaddr* ai_addr; addrinfo_emulation* ai_next; }; #if !defined(AI_PASSIVE) # define AI_PASSIVE 0x1 #endif #if !defined(AI_CANONNAME) # define AI_CANONNAME 0x2 #endif #if !defined(AI_NUMERICHOST) # define AI_NUMERICHOST 0x4 #endif #if !defined(EAI_AGAIN) # define EAI_AGAIN WSATRY_AGAIN #endif #if !defined(EAI_BADFLAGS) # define EAI_BADFLAGS WSAEINVAL #endif #if !defined(EAI_FAIL) # define EAI_FAIL WSANO_RECOVERY #endif #if !defined(EAI_FAMILY) # define EAI_FAMILY WSAEAFNOSUPPORT #endif #if !defined(EAI_MEMORY) # define EAI_MEMORY WSA_NOT_ENOUGH_MEMORY #endif #if !defined(EAI_NODATA) # define EAI_NODATA WSANO_DATA #endif #if !defined(EAI_NONAME) # define EAI_NONAME WSAHOST_NOT_FOUND #endif #if !defined(EAI_SERVICE) # define EAI_SERVICE WSATYPE_NOT_FOUND #endif #if !defined(EAI_SOCKTYPE) # define EAI_SOCKTYPE WSAESOCKTNOSUPPORT #endif #if !defined(NI_NOFQDN) # define NI_NOFQDN 0x01 #endif #if !defined(NI_NUMERICHOST) # define NI_NUMERICHOST 0x02 #endif #if !defined(NI_NAMEREQD) # define NI_NAMEREQD 0x04 #endif #if !defined(NI_NUMERICSERV) # define NI_NUMERICSERV 0x08 #endif #if !defined(NI_DGRAM) # define NI_DGRAM 0x10 #endif #if !defined(IPPROTO_IPV6) # define IPPROTO_IPV6 41 #endif #if !defined(IPV6_UNICAST_HOPS) # define IPV6_UNICAST_HOPS 4 #endif #if !defined(IPV6_MULTICAST_IF) # define IPV6_MULTICAST_IF 9 #endif #if !defined(IPV6_MULTICAST_HOPS) # define IPV6_MULTICAST_HOPS 10 #endif #if !defined(IPV6_MULTICAST_LOOP) # define IPV6_MULTICAST_LOOP 11 #endif #if !defined(IPV6_JOIN_GROUP) # define IPV6_JOIN_GROUP 12 #endif #if !defined(IPV6_LEAVE_GROUP) # define IPV6_LEAVE_GROUP 13 #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_OLD_WIN_SDK) // Even newer Platform SDKs that support IPv6 may not define IPV6_V6ONLY. #if !defined(IPV6_V6ONLY) # define IPV6_V6ONLY 27 #endif // Some SDKs (e.g. Windows CE) don't define IPPROTO_ICMPV6. #if !defined(IPPROTO_ICMPV6) # define IPPROTO_ICMPV6 58 #endif #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_OLD_WIN_SDK_COMPAT_HPP galera-4-26.4.25/asio/asio/detail/op_queue.hpp000644 000164 177776 00000006246 15107057155 022207 0ustar00jenkinsnogroup000000 000000 // // detail/op_queue.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OP_QUEUE_HPP #define ASIO_DETAIL_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class op_queue; class op_queue_access { public: template static Operation* next(Operation* o) { return static_cast(o->next_); } template static void next(Operation1*& o1, Operation2* o2) { o1->next_ = o2; } template static void destroy(Operation* o) { o->destroy(); } template static Operation*& front(op_queue& q) { return q.front_; } template static Operation*& back(op_queue& q) { return q.back_; } }; template class op_queue : private noncopyable { public: // Constructor. op_queue() : front_(0), back_(0) { } // Destructor destroys all operations. ~op_queue() { while (Operation* op = front_) { pop(); op_queue_access::destroy(op); } } // Get the operation at the front of the queue. Operation* front() { return front_; } // Pop an operation from the front of the queue. void pop() { if (front_) { Operation* tmp = front_; front_ = op_queue_access::next(front_); if (front_ == 0) back_ = 0; op_queue_access::next(tmp, static_cast(0)); } } // Push an operation on to the back of the queue. void push(Operation* h) { op_queue_access::next(h, static_cast(0)); if (back_) { op_queue_access::next(back_, h); back_ = h; } else { front_ = back_ = h; } } // Push all operations from another queue on to the back of the queue. The // source queue may contain operations of a derived type. template void push(op_queue& q) { if (Operation* other_front = op_queue_access::front(q)) { if (back_) op_queue_access::next(back_, other_front); else front_ = other_front; back_ = op_queue_access::back(q); op_queue_access::front(q) = 0; op_queue_access::back(q) = 0; } } // Whether the queue is empty. bool empty() const { return front_ == 0; } // Test whether an operation is already enqueued. bool is_enqueued(Operation* o) const { return op_queue_access::next(o) != 0 || back_ == o; } private: friend class op_queue_access; // The front of the queue. Operation* front_; // The back of the queue. Operation* back_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_OP_QUEUE_HPP galera-4-26.4.25/asio/asio/detail/mutex.hpp000644 000164 177776 00000002300 15107057155 021512 0ustar00jenkinsnogroup000000 000000 // // detail/mutex.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MUTEX_HPP #define ASIO_DETAIL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_mutex.hpp" #else # error Only Windows, POSIX and std::mutex are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_mutex mutex; #elif defined(ASIO_WINDOWS) typedef win_mutex mutex; #elif defined(ASIO_HAS_PTHREADS) typedef posix_mutex mutex; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_mutex mutex; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/macos_fenced_block.hpp000644 000164 177776 00000002423 15107057155 024136 0ustar00jenkinsnogroup000000 000000 // // detail/macos_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #define ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__MACH__) && defined(__APPLE__) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class macos_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit macos_fenced_block(half_t) { } // Constructor for a full fenced block. explicit macos_fenced_block(full_t) { OSMemoryBarrier(); } // Destructor. ~macos_fenced_block() { OSMemoryBarrier(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__MACH__) && defined(__APPLE__) #endif // ASIO_DETAIL_MACOS_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/timer_queue_set.hpp000644 000164 177776 00000003172 15107057155 023557 0ustar00jenkinsnogroup000000 000000 // // detail/timer_queue_set.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_SET_HPP #define ASIO_DETAIL_TIMER_QUEUE_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_set { public: // Constructor. ASIO_DECL timer_queue_set(); // Add a timer queue to the set. ASIO_DECL void insert(timer_queue_base* q); // Remove a timer queue from the set. ASIO_DECL void erase(timer_queue_base* q); // Determine whether all queues are empty. ASIO_DECL bool all_empty() const; // Get the wait duration in milliseconds. ASIO_DECL long wait_duration_msec(long max_duration) const; // Get the wait duration in microseconds. ASIO_DECL long wait_duration_usec(long max_duration) const; // Dequeue all ready timers. ASIO_DECL void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL void get_all_timers(op_queue& ops); private: timer_queue_base* first_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_set.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_TIMER_QUEUE_SET_HPP galera-4-26.4.25/asio/asio/detail/keyword_tss_ptr.hpp000644 000164 177776 00000002450 15107057155 023620 0ustar00jenkinsnogroup000000 000000 // // detail/keyword_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #define ASIO_DETAIL_KEYWORD_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class keyword_tss_ptr : private noncopyable { public: // Constructor. keyword_tss_ptr() { } // Destructor. ~keyword_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: static ASIO_THREAD_KEYWORD T* value_; }; template ASIO_THREAD_KEYWORD T* keyword_tss_ptr::value_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) #endif // ASIO_DETAIL_KEYWORD_TSS_PTR_HPP galera-4-26.4.25/asio/asio/detail/handler_invoke_helpers.hpp000644 000164 177776 00000003147 15107057155 025074 0ustar00jenkinsnogroup000000 000000 // // detail/handler_invoke_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #define ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/memory.hpp" #include "asio/handler_invoke_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_invoke must be made from a namespace that does not // contain overloads of this function. The asio_handler_invoke_helpers // namespace is defined here for that purpose. namespace asio_handler_invoke_helpers { template inline void invoke(Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } template inline void invoke(const Function& function, Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) Function tmp(function); tmp(); #else using asio::asio_handler_invoke; asio_handler_invoke(function, asio::detail::addressof(context)); #endif } } // namespace asio_handler_invoke_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_INVOKE_HELPERS_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_thread_info.hpp000644 000164 177776 00000001474 15107057155 024534 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #define ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_thread_info : public thread_info_base { }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WIN_IOCP_THREAD_INFO_HPP galera-4-26.4.25/asio/asio/detail/is_buffer_sequence.hpp000644 000164 177776 00000017165 15107057155 024223 0ustar00jenkinsnogroup000000 000000 // // detail/is_buffer_sequence.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP #define ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { class mutable_buffer; class const_buffer; namespace detail { struct buffer_sequence_memfns_base { void begin(); void end(); void size(); void max_size(); void capacity(); void data(); void prepare(); void commit(); void consume(); void grow(); void shrink(); }; template struct buffer_sequence_memfns_derived : T, buffer_sequence_memfns_base { }; template struct buffer_sequence_memfns_check { }; #if defined(ASIO_HAS_DECLTYPE) template char buffer_sequence_begin_helper(...); template char (&buffer_sequence_begin_helper(T* t, typename enable_if::value>::type*))[2]; #else // defined(ASIO_HAS_DECLTYPE) template char (&buffer_sequence_begin_helper(...))[2]; template char buffer_sequence_begin_helper(T* t, buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::begin>*); #endif // defined(ASIO_HAS_DECLTYPE) #if defined(ASIO_HAS_DECLTYPE) template char buffer_sequence_end_helper(...); template char (&buffer_sequence_end_helper(T* t, typename enable_if::value>::type*))[2]; #else // defined(ASIO_HAS_DECLTYPE) template char (&buffer_sequence_end_helper(...))[2]; template char buffer_sequence_end_helper(T* t, buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::end>*); #endif // defined(ASIO_HAS_DECLTYPE) template char (&size_memfn_helper(...))[2]; template char size_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::size>*); template char (&max_size_memfn_helper(...))[2]; template char max_size_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::max_size>*); template char (&capacity_memfn_helper(...))[2]; template char capacity_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::capacity>*); template char (&data_memfn_helper(...))[2]; template char data_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::data>*); template char (&prepare_memfn_helper(...))[2]; template char prepare_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::prepare>*); template char (&commit_memfn_helper(...))[2]; template char commit_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::commit>*); template char (&consume_memfn_helper(...))[2]; template char consume_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::consume>*); template char (&grow_memfn_helper(...))[2]; template char grow_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::grow>*); template char (&shrink_memfn_helper(...))[2]; template char shrink_memfn_helper( buffer_sequence_memfns_check< void (buffer_sequence_memfns_base::*)(), &buffer_sequence_memfns_derived::shrink>*); template char (&buffer_sequence_element_type_helper(...))[2]; #if defined(ASIO_HAS_DECLTYPE) template char buffer_sequence_element_type_helper(T* t, typename enable_if::value>::type*); #else // defined(ASIO_HAS_DECLTYPE) template char buffer_sequence_element_type_helper( typename T::const_iterator*, typename enable_if::value>::type*); #endif // defined(ASIO_HAS_DECLTYPE) template char (&const_buffers_type_typedef_helper(...))[2]; template char const_buffers_type_typedef_helper( typename T::const_buffers_type*); template char (&mutable_buffers_type_typedef_helper(...))[2]; template char mutable_buffers_type_typedef_helper( typename T::mutable_buffers_type*); template struct is_buffer_sequence_class : integral_constant(0, 0)) != 1 && sizeof(buffer_sequence_end_helper(0, 0)) != 1 && sizeof(buffer_sequence_element_type_helper(0, 0)) == 1> { }; template struct is_buffer_sequence : conditional::value, is_buffer_sequence_class, false_type>::type { }; template <> struct is_buffer_sequence : true_type { }; template <> struct is_buffer_sequence : true_type { }; template <> struct is_buffer_sequence : true_type { }; template <> struct is_buffer_sequence : false_type { }; template struct is_dynamic_buffer_class_v1 : integral_constant(0)) != 1 && sizeof(max_size_memfn_helper(0)) != 1 && sizeof(capacity_memfn_helper(0)) != 1 && sizeof(data_memfn_helper(0)) != 1 && sizeof(consume_memfn_helper(0)) != 1 && sizeof(prepare_memfn_helper(0)) != 1 && sizeof(commit_memfn_helper(0)) != 1 && sizeof(const_buffers_type_typedef_helper(0)) == 1 && sizeof(mutable_buffers_type_typedef_helper(0)) == 1> { }; template struct is_dynamic_buffer_v1 : conditional::value, is_dynamic_buffer_class_v1, false_type>::type { }; template struct is_dynamic_buffer_class_v2 : integral_constant(0)) != 1 && sizeof(max_size_memfn_helper(0)) != 1 && sizeof(capacity_memfn_helper(0)) != 1 && sizeof(data_memfn_helper(0)) != 1 && sizeof(consume_memfn_helper(0)) != 1 && sizeof(grow_memfn_helper(0)) != 1 && sizeof(shrink_memfn_helper(0)) != 1 && sizeof(const_buffers_type_typedef_helper(0)) == 1 && sizeof(mutable_buffers_type_typedef_helper(0)) == 1> { }; template struct is_dynamic_buffer_v2 : conditional::value, is_dynamic_buffer_class_v2, false_type>::type { }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IS_BUFFER_SEQUENCE_HPP galera-4-26.4.25/asio/asio/detail/memory.hpp000644 000164 177776 00000003476 15107057155 021677 0ustar00jenkinsnogroup000000 000000 // // detail/memory.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_MEMORY_HPP #define ASIO_DETAIL_MEMORY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #if !defined(ASIO_HAS_STD_SHARED_PTR) # include # include #endif // !defined(ASIO_HAS_STD_SHARED_PTR) #if !defined(ASIO_HAS_STD_ADDRESSOF) # include #endif // !defined(ASIO_HAS_STD_ADDRESSOF) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_SHARED_PTR) using std::shared_ptr; using std::weak_ptr; #else // defined(ASIO_HAS_STD_SHARED_PTR) using boost::shared_ptr; using boost::weak_ptr; #endif // defined(ASIO_HAS_STD_SHARED_PTR) #if defined(ASIO_HAS_STD_ADDRESSOF) using std::addressof; #else // defined(ASIO_HAS_STD_ADDRESSOF) using boost::addressof; #endif // defined(ASIO_HAS_STD_ADDRESSOF) } // namespace detail #if defined(ASIO_HAS_CXX11_ALLOCATORS) using std::allocator_arg_t; # define ASIO_USES_ALLOCATOR(t) \ namespace std { \ template \ struct uses_allocator : true_type {}; \ } \ /**/ # define ASIO_REBIND_ALLOC(alloc, t) \ typename std::allocator_traits::template rebind_alloc /**/ #else // defined(ASIO_HAS_CXX11_ALLOCATORS) struct allocator_arg_t {}; # define ASIO_USES_ALLOCATOR(t) # define ASIO_REBIND_ALLOC(alloc, t) \ typename alloc::template rebind::other /**/ #endif // defined(ASIO_HAS_CXX11_ALLOCATORS) } // namespace asio #endif // ASIO_DETAIL_MEMORY_HPP galera-4-26.4.25/asio/asio/detail/std_static_mutex.hpp000644 000164 177776 00000002702 15107057155 023741 0ustar00jenkinsnogroup000000 000000 // // detail/std_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_STATIC_MUTEX_HPP #define ASIO_DETAIL_STD_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_static_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_static_mutex(int) { } // Destructor. ~std_static_mutex() { } // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; #define ASIO_STD_STATIC_MUTEX_INIT 0 } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_STATIC_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/resolve_op.hpp000644 000164 177776 00000001742 15107057155 022536 0ustar00jenkinsnogroup000000 000000 // // detail/resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVE_OP_HPP #define ASIO_DETAIL_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolve_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: resolve_op(func_type complete_func) : operation(complete_func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVE_OP_HPP galera-4-26.4.25/asio/asio/detail/scheduler_operation.hpp000644 000164 177776 00000003410 15107057155 024411 0ustar00jenkinsnogroup000000 000000 // // detail/scheduler_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCHEDULER_OPERATION_HPP #define ASIO_DETAIL_SCHEDULER_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/error_code.hpp" #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class scheduler; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class scheduler_operation ASIO_INHERIT_TRACKED_HANDLER { public: typedef scheduler_operation operation_type; void complete(void* owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)(void*, scheduler_operation*, const asio::error_code&, std::size_t); scheduler_operation(func_type func) : next_(0), func_(func), task_result_(0) { } // Prevents deletion through this type. ~scheduler_operation() { } private: friend class op_queue_access; scheduler_operation* next_; func_type func_; protected: friend class scheduler; unsigned int task_result_; // Passed into bytes transferred. }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCHEDULER_OPERATION_HPP galera-4-26.4.25/asio/asio/detail/resolver_service.hpp000644 000164 177776 00000011004 15107057155 023732 0ustar00jenkinsnogroup000000 000000 // // detail/resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/detail/concurrency_hint.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/resolve_endpoint_op.hpp" #include "asio/detail/resolve_query_op.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolver_service : public execution_context_service_base >, public resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The results type. typedef asio::ip::basic_resolver_results results_type; // Constructor. resolver_service(execution_context& context) : execution_context_service_base >(context), resolver_service_base(context) { } // Destroy all user-defined handler objects owned by the service. void shutdown() { this->base_shutdown(); } // Perform any fork-related housekeeping. void notify_fork(execution_context::fork_event fork_ev) { this->base_notify_fork(fork_ev); } // Resolve a query to a list of entries. results_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { asio::detail::addrinfo_type* address_info = 0; socket_ops::getaddrinfo(query.host_name().c_str(), query.service_name().c_str(), query.hints(), &address_info, ec); auto_addrinfo auto_address_info(address_info); return ec ? results_type() : results_type::create( address_info, query.host_name(), query.service_name()); } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type& impl, const query_type& query, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef resolve_query_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl, query, scheduler_, handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "resolver", &impl, 0, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } // Resolve an endpoint to a list of entries. results_type resolve(implementation_type&, const endpoint_type& endpoint, asio::error_code& ec) { char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::sync_getnameinfo(endpoint.data(), endpoint.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, endpoint.protocol().type(), ec); return ec ? results_type() : results_type::create( endpoint, host_name, service_name); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type& impl, const endpoint_type& endpoint, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef resolve_endpoint_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl, endpoint, scheduler_, handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "resolver", &impl, 0, "async_resolve")); start_resolve_op(p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_RESOLVER_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/thread_info_base.hpp000644 000164 177776 00000006031 15107057155 023631 0ustar00jenkinsnogroup000000 000000 // // detail/thread_info_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_INFO_BASE_HPP #define ASIO_DETAIL_THREAD_INFO_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class thread_info_base : private noncopyable { public: struct default_tag { enum { mem_index = 0 }; }; struct awaitable_frame_tag { enum { mem_index = 1 }; }; struct executor_function_tag { enum { mem_index = 2 }; }; thread_info_base() { for (int i = 0; i < max_mem_index; ++i) reusable_memory_[i] = 0; } ~thread_info_base() { for (int i = 0; i < max_mem_index; ++i) if (reusable_memory_[i]) ::operator delete(reusable_memory_[i]); } static void* allocate(thread_info_base* this_thread, std::size_t size) { return allocate(default_tag(), this_thread, size); } static void deallocate(thread_info_base* this_thread, void* pointer, std::size_t size) { deallocate(default_tag(), this_thread, pointer, size); } template static void* allocate(Purpose, thread_info_base* this_thread, std::size_t size) { std::size_t chunks = (size + chunk_size - 1) / chunk_size; if (this_thread && this_thread->reusable_memory_[Purpose::mem_index]) { void* const pointer = this_thread->reusable_memory_[Purpose::mem_index]; this_thread->reusable_memory_[Purpose::mem_index] = 0; unsigned char* const mem = static_cast(pointer); if (static_cast(mem[0]) >= chunks) { mem[size] = mem[0]; return pointer; } ::operator delete(pointer); } void* const pointer = ::operator new(chunks * chunk_size + 1); unsigned char* const mem = static_cast(pointer); mem[size] = (chunks <= UCHAR_MAX) ? static_cast(chunks) : 0; return pointer; } template static void deallocate(Purpose, thread_info_base* this_thread, void* pointer, std::size_t size) { if (size <= chunk_size * UCHAR_MAX) { if (this_thread && this_thread->reusable_memory_[Purpose::mem_index] == 0) { unsigned char* const mem = static_cast(pointer); mem[0] = mem[size]; this_thread->reusable_memory_[Purpose::mem_index] = pointer; return; } } ::operator delete(pointer); } private: enum { chunk_size = 4 }; enum { max_mem_index = 3 }; void* reusable_memory_[max_mem_index]; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_THREAD_INFO_BASE_HPP galera-4-26.4.25/asio/asio/detail/null_thread.hpp000644 000164 177776 00000002453 15107057155 022662 0ustar00jenkinsnogroup000000 000000 // // detail/null_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_THREAD_HPP #define ASIO_DETAIL_NULL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_thread : private noncopyable { public: // Constructor. template null_thread(Function, unsigned int = 0) { asio::detail::throw_error( asio::error::operation_not_supported, "thread"); } // Destructor. ~null_thread() { } // Wait for the thread to exit. void join() { } // Get number of CPUs. static std::size_t hardware_concurrency() { return 1; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_THREAD_HPP galera-4-26.4.25/asio/asio/detail/static_mutex.hpp000644 000164 177776 00000003032 15107057155 023064 0ustar00jenkinsnogroup000000 000000 // // detail/static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STATIC_MUTEX_HPP #define ASIO_DETAIL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_static_mutex.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_static_mutex.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_static_mutex.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_static_mutex.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_NULL_STATIC_MUTEX_INIT #elif defined(ASIO_WINDOWS) typedef win_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_WIN_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_PTHREADS) typedef posix_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_POSIX_STATIC_MUTEX_INIT #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_static_mutex static_mutex; # define ASIO_STATIC_MUTEX_INIT ASIO_STD_STATIC_MUTEX_INIT #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_STATIC_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/executor_function.hpp000644 000164 177776 00000004766 15107057155 024135 0ustar00jenkinsnogroup000000 000000 // // detail/executor_function.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EXECUTOR_FUNCTION_HPP #define ASIO_DETAIL_EXECUTOR_FUNCTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class executor_function_base { public: void complete() { func_(this, true); } void destroy() { func_(this, false); } protected: typedef void (*func_type)(executor_function_base*, bool); executor_function_base(func_type func) : func_(func) { } // Prevents deletion through this type. ~executor_function_base() { } private: func_type func_; }; template class executor_function : public executor_function_base { public: ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR( thread_info_base::executor_function_tag, executor_function); template executor_function(ASIO_MOVE_ARG(F) f, const Alloc& allocator) : executor_function_base(&executor_function::do_complete), function_(ASIO_MOVE_CAST(F)(f)), allocator_(allocator) { } static void do_complete(executor_function_base* base, bool call) { // Take ownership of the function object. executor_function* o(static_cast(base)); Alloc allocator(o->allocator_); ptr p = { detail::addressof(allocator), o, o }; // Make a copy of the function so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the function may be the true owner of the memory // associated with the function. Consequently, a local copy of the function // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. Function function(ASIO_MOVE_CAST(Function)(o->function_)); p.reset(); // Make the upcall if required. if (call) { function(); } } private: Function function_; Alloc allocator_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_EXECUTOR_FUNCTION_HPP galera-4-26.4.25/asio/asio/detail/noncopyable.hpp000644 000164 177776 00000001626 15107057155 022673 0ustar00jenkinsnogroup000000 000000 // // detail/noncopyable.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NONCOPYABLE_HPP #define ASIO_DETAIL_NONCOPYABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class noncopyable { protected: noncopyable() {} ~noncopyable() {} private: noncopyable(const noncopyable&); const noncopyable& operator=(const noncopyable&); }; } // namespace detail using asio::detail::noncopyable; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NONCOPYABLE_HPP galera-4-26.4.25/asio/asio/detail/base_from_completion_cond.hpp000644 000164 177776 00000003232 15107057155 025546 0ustar00jenkinsnogroup000000 000000 // // detail/base_from_completion_cond.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #define ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class base_from_completion_cond { protected: explicit base_from_completion_cond(CompletionCondition& completion_condition) : completion_condition_( ASIO_MOVE_CAST(CompletionCondition)(completion_condition)) { } std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return detail::adapt_completion_condition_result( completion_condition_(ec, total_transferred)); } private: CompletionCondition completion_condition_; }; template <> class base_from_completion_cond { protected: explicit base_from_completion_cond(transfer_all_t) { } static std::size_t check_for_completion( const asio::error_code& ec, std::size_t total_transferred) { return transfer_all_t()(ec, total_transferred); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP galera-4-26.4.25/asio/asio/detail/handler_cont_helpers.hpp000644 000164 177776 00000002404 15107057155 024537 0ustar00jenkinsnogroup000000 000000 // // detail/handler_cont_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #define ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/memory.hpp" #include "asio/handler_continuation_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_is_continuation must be made from a namespace that // does not contain overloads of this function. This namespace is defined here // for that purpose. namespace asio_handler_cont_helpers { template inline bool is_continuation(Context& context) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return false; #else using asio::asio_handler_is_continuation; return asio_handler_is_continuation( asio::detail::addressof(context)); #endif } } // namespace asio_handler_cont_helpers #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP galera-4-26.4.25/asio/asio/detail/scoped_lock.hpp000644 000164 177776 00000003513 15107057155 022644 0ustar00jenkinsnogroup000000 000000 // // detail/scoped_lock.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_LOCK_HPP #define ASIO_DETAIL_SCOPED_LOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to lock and unlock a mutex automatically. template class scoped_lock : private noncopyable { public: // Tag type used to distinguish constructors. enum adopt_lock_t { adopt_lock }; // Constructor adopts a lock that is already held. scoped_lock(Mutex& m, adopt_lock_t) : mutex_(m), locked_(true) { } // Constructor acquires the lock. explicit scoped_lock(Mutex& m) : mutex_(m) { mutex_.lock(); locked_ = true; } // Destructor releases the lock. ~scoped_lock() { if (locked_) mutex_.unlock(); } // Explicitly acquire the lock. void lock() { if (!locked_) { mutex_.lock(); locked_ = true; } } // Explicitly release the lock. void unlock() { if (locked_) { mutex_.unlock(); locked_ = false; } } // Test whether the lock is held. bool locked() const { return locked_; } // Get the underlying mutex. Mutex& mutex() { return mutex_; } private: // The underlying mutex. Mutex& mutex_; // Whether the mutex is currently locked or unlocked. bool locked_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_LOCK_HPP galera-4-26.4.25/asio/asio/detail/wait_handler.hpp000644 000164 177776 00000005104 15107057155 023016 0ustar00jenkinsnogroup000000 000000 // // detail/wait_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_HANDLER_HPP #define ASIO_DETAIL_WAIT_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/handler_work.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class wait_handler : public wait_op { public: ASIO_DEFINE_HANDLER_PTR(wait_handler); wait_handler(Handler& h, const IoExecutor& ex) : wait_op(&wait_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)), io_executor_(ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. wait_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; handler_work w(h->handler_, h->io_executor_); ASIO_HANDLER_COMPLETION((*h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(h->handler_, h->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_HANDLER_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_sendto_op.hpp000644 000164 177776 00000010473 15107057155 025606 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_sendto_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_sendto_op_base : public reactor_op { public: reactive_socket_sendto_op_base(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_sendto_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), destination_(endpoint), flags_(flags) { } static status do_perform(reactor_op* base) { reactive_socket_sendto_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = socket_ops::non_blocking_sendto(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->destination_.data(), o->destination_.size(), o->ec_, o->bytes_transferred_) ? done : not_done; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_sendto", o->ec_, o->bytes_transferred_)); return result; } private: socket_type socket_; ConstBufferSequence buffers_; Endpoint destination_; socket_base::message_flags flags_; }; template class reactive_socket_sendto_op : public reactive_socket_sendto_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_sendto_op); reactive_socket_sendto_op(socket_type socket, const ConstBufferSequence& buffers, const Endpoint& endpoint, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) : reactive_socket_sendto_op_base(socket, buffers, endpoint, flags, &reactive_socket_sendto_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_sendto_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SENDTO_OP_HPP galera-4-26.4.25/asio/asio/detail/handler_alloc_helpers.hpp000644 000164 177776 00000013464 15107057155 024676 0ustar00jenkinsnogroup000000 000000 // // detail/handler_alloc_helpers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #define ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/associated_allocator.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/detail/push_options.hpp" // Calls to asio_handler_allocate and asio_handler_deallocate must be made from // a namespace that does not contain any overloads of these functions. The // asio_handler_alloc_helpers namespace is defined here for that purpose. namespace asio_handler_alloc_helpers { template inline void* allocate(std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) return ::operator new(s); #else using asio::asio_handler_allocate; return asio_handler_allocate(s, asio::detail::addressof(h)); #endif } template inline void deallocate(void* p, std::size_t s, Handler& h) { #if !defined(ASIO_HAS_HANDLER_HOOKS) ::operator delete(p); #else using asio::asio_handler_deallocate; asio_handler_deallocate(p, s, asio::detail::addressof(h)); #endif } } // namespace asio_handler_alloc_helpers namespace asio { namespace detail { template class hook_allocator { public: typedef T value_type; template struct rebind { typedef hook_allocator other; }; explicit hook_allocator(Handler& h) : handler_(h) { } template hook_allocator(const hook_allocator& a) : handler_(a.handler_) { } T* allocate(std::size_t n) { return static_cast( asio_handler_alloc_helpers::allocate(sizeof(T) * n, handler_)); } void deallocate(T* p, std::size_t n) { asio_handler_alloc_helpers::deallocate(p, sizeof(T) * n, handler_); } //private: Handler& handler_; }; template class hook_allocator { public: typedef void value_type; template struct rebind { typedef hook_allocator other; }; explicit hook_allocator(Handler& h) : handler_(h) { } template hook_allocator(const hook_allocator& a) : handler_(a.handler_) { } //private: Handler& handler_; }; template struct get_hook_allocator { typedef Allocator type; static type get(Handler&, const Allocator& a) { return a; } }; template struct get_hook_allocator > { typedef hook_allocator type; static type get(Handler& handler, const std::allocator&) { return type(handler); } }; } // namespace detail } // namespace asio #define ASIO_DEFINE_HANDLER_PTR(op) \ struct ptr \ { \ Handler* h; \ op* v; \ op* p; \ ~ptr() \ { \ reset(); \ } \ static op* allocate(Handler& handler) \ { \ typedef typename ::asio::associated_allocator< \ Handler>::type associated_allocator_type; \ typedef typename ::asio::detail::get_hook_allocator< \ Handler, associated_allocator_type>::type hook_allocator_type; \ ASIO_REBIND_ALLOC(hook_allocator_type, op) a( \ ::asio::detail::get_hook_allocator< \ Handler, associated_allocator_type>::get( \ handler, ::asio::get_associated_allocator(handler))); \ return a.allocate(1); \ } \ void reset() \ { \ if (p) \ { \ p->~op(); \ p = 0; \ } \ if (v) \ { \ typedef typename ::asio::associated_allocator< \ Handler>::type associated_allocator_type; \ typedef typename ::asio::detail::get_hook_allocator< \ Handler, associated_allocator_type>::type hook_allocator_type; \ ASIO_REBIND_ALLOC(hook_allocator_type, op) a( \ ::asio::detail::get_hook_allocator< \ Handler, associated_allocator_type>::get( \ *h, ::asio::get_associated_allocator(*h))); \ a.deallocate(static_cast(v), 1); \ v = 0; \ } \ } \ } \ /**/ #define ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR(purpose, op) \ struct ptr \ { \ const Alloc* a; \ void* v; \ op* p; \ ~ptr() \ { \ reset(); \ } \ static op* allocate(const Alloc& a) \ { \ typedef typename ::asio::detail::get_recycling_allocator< \ Alloc, purpose>::type recycling_allocator_type; \ ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \ ::asio::detail::get_recycling_allocator< \ Alloc, purpose>::get(a)); \ return a1.allocate(1); \ } \ void reset() \ { \ if (p) \ { \ p->~op(); \ p = 0; \ } \ if (v) \ { \ typedef typename ::asio::detail::get_recycling_allocator< \ Alloc, purpose>::type recycling_allocator_type; \ ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \ ::asio::detail::get_recycling_allocator< \ Alloc, purpose>::get(*a)); \ a1.deallocate(static_cast(v), 1); \ v = 0; \ } \ } \ } \ /**/ #define ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(op) \ ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR( \ ::asio::detail::thread_info_base::default_tag, op ) \ /**/ #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP galera-4-26.4.25/asio/asio/detail/winrt_utils.hpp000644 000164 177776 00000005102 15107057155 022736 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_utils.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_UTILS_HPP #define ASIO_DETAIL_WINRT_UTILS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include #include #include #include #include "asio/buffer.hpp" #include "asio/error_code.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace winrt_utils { inline Platform::String^ string(const char* from) { std::wstring tmp(from, from + std::strlen(from)); return ref new Platform::String(tmp.c_str()); } inline Platform::String^ string(const std::string& from) { std::wstring tmp(from.begin(), from.end()); return ref new Platform::String(tmp.c_str()); } inline std::string string(Platform::String^ from) { std::wstring_convert> converter; return converter.to_bytes(from->Data()); } inline Platform::String^ string(unsigned short from) { return string(std::to_string(from)); } template inline Platform::String^ string(const T& from) { return string(from.to_string()); } inline int integer(Platform::String^ from) { return _wtoi(from->Data()); } template inline Windows::Networking::HostName^ host_name(const T& from) { return ref new Windows::Networking::HostName((string)(from)); } template inline Windows::Storage::Streams::IBuffer^ buffer_dup( const ConstBufferSequence& buffers) { using Microsoft::WRL::ComPtr; using asio::buffer_size; std::size_t size = buffer_size(buffers); auto b = ref new Windows::Storage::Streams::Buffer(size); ComPtr insp = reinterpret_cast(b); ComPtr bacc; insp.As(&bacc); byte* bytes = nullptr; bacc->Buffer(&bytes); asio::buffer_copy(asio::buffer(bytes, size), buffers); b->Length = size; return b; } } // namespace winrt_utils } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_UTILS_HPP galera-4-26.4.25/asio/asio/detail/scoped_ptr.hpp000644 000164 177776 00000002535 15107057155 022524 0ustar00jenkinsnogroup000000 000000 // // detail/scoped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCOPED_PTR_HPP #define ASIO_DETAIL_SCOPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class scoped_ptr { public: // Constructor. explicit scoped_ptr(T* p = 0) : p_(p) { } // Destructor. ~scoped_ptr() { delete p_; } // Access. T* get() { return p_; } // Access. T* operator->() { return p_; } // Dereference. T& operator*() { return *p_; } // Reset pointer. void reset(T* p = 0) { delete p_; p_ = p; } // Release ownership of the pointer. T* release() { T* tmp = p_; p_ = 0; return tmp; } private: // Disallow copying and assignment. scoped_ptr(const scoped_ptr&); scoped_ptr& operator=(const scoped_ptr&); T* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCOPED_PTR_HPP galera-4-26.4.25/asio/asio/detail/impl/000755 000164 177776 00000000000 15107057160 020601 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/detail/impl/descriptor_ops.ipp000644 000164 177776 00000026203 15107057155 024361 0ustar00jenkinsnogroup000000 000000 // // detail/impl/descriptor_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/descriptor_ops.hpp" #include "asio/error.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace descriptor_ops { int open(const char* path, int flags, asio::error_code& ec) { errno = 0; int result = error_wrapper(::open(path, flags), ec); if (result >= 0) ec = asio::error_code(); return result; } int close(int d, state_type& state, asio::error_code& ec) { int result = 0; if (d != -1) { errno = 0; result = error_wrapper(::close(d), ec); if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(__SYMBIAN32__) int flags = ::fcntl(d, F_GETFL, 0); if (flags >= 0) ::fcntl(d, F_SETFL, flags & ~O_NONBLOCK); #else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(d, FIONBIO, &arg); #endif // defined(__SYMBIAN32__) state &= ~non_blocking; errno = 0; result = error_wrapper(::close(d), ec); } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(int d, state_type& state, bool value, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } errno = 0; #if defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec); if (result >= 0) { errno = 0; int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(d, F_SETFL, flag), ec); } #else // defined(__SYMBIAN32__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec); #endif // defined(__SYMBIAN32__) if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } std::size_t sync_read(int d, state_type state, buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if (bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_read(d, 0, ec) < 0) return 0; } } bool non_blocking_read(int d, buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Read some data. errno = 0; signed_size_type bytes = error_wrapper(::readv( d, bufs, static_cast(count)), ec); // Check for end of stream. if (bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes > 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } std::size_t sync_write(int d, state_type state, const buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a stream is a no-op. if (all_empty) { ec = asio::error_code(); return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for descriptor to become ready. if (descriptor_ops::poll_write(d, 0, ec) < 0) return 0; } } bool non_blocking_write(int d, const buf* bufs, std::size_t count, asio::error_code& ec, std::size_t& bytes_transferred) { for (;;) { // Write some data. errno = 0; signed_size_type bytes = error_wrapper(::writev( d, bufs, static_cast(count)), ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } int ioctl(int d, state_type& state, long cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::ioctl(d, cmd, arg), ec); if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the descriptor is // already in the correct state. This ensures that the underlying // descriptor is put into the state that has been requested by the user. If // the ioctl syscall was successful then we need to update the flags to // match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int fcntl(int d, int cmd, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd), ec); if (result != -1) ec = asio::error_code(); return result; } int fcntl(int d, int cmd, long arg, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } errno = 0; int result = error_wrapper(::fcntl(d, cmd, arg), ec); if (result != -1) ec = asio::error_code(); return result; } int poll_read(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_error(int d, state_type state, asio::error_code& ec) { if (d == -1) { ec = asio::error::bad_descriptor; return -1; } pollfd fds; fds.fd = d; fds.events = POLLPRI | POLLERR | POLLHUP; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : -1; errno = 0; int result = error_wrapper(::poll(&fds, 1, timeout), ec); if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } } // namespace descriptor_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP galera-4-26.4.25/asio/asio/detail/impl/strand_service.hpp000644 000164 177776 00000006376 15107057155 024345 0ustar00jenkinsnogroup000000 000000 // // detail/impl/strand_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/call_stack.hpp" #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline strand_service::strand_impl::strand_impl() : operation(&strand_service::do_complete), locked_(false) { } struct strand_service::on_dispatch_exit { io_context_impl* io_context_; strand_impl* impl_; ~on_dispatch_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) io_context_->post_immediate_completion(impl_, false); } }; template void strand_service::dispatch(strand_service::implementation_type& impl, Handler& handler) { // If we are already in the strand then the handler can run immediately. if (call_stack::contains(impl)) { fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(handler, handler); return; } // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((this->context(), *p.p, "strand", impl, 0, "dispatch")); bool dispatch_immediately = do_dispatch(impl, p.p); operation* o = p.p; p.v = p.p = 0; if (dispatch_immediately) { // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_dispatch_exit on_exit = { &io_context_, impl }; (void)on_exit; completion_handler::do_complete( &io_context_, o, asio::error_code(), 0); } } // Request the io_context to invoke the given handler and return immediately. template void strand_service::post(strand_service::implementation_type& impl, Handler& handler) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef completion_handler op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler); ASIO_HANDLER_CREATION((this->context(), *p.p, "strand", impl, 0, "post")); do_post(impl, p.p, is_continuation); p.v = p.p = 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/impl/kqueue_reactor.ipp000644 000164 177776 00000040170 15107057155 024337 0ustar00jenkinsnogroup000000 000000 // // detail/impl/kqueue_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/kqueue_reactor.hpp" #include "asio/detail/scheduler.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" #if defined(__NetBSD__) # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, \ reinterpret_cast(static_cast(udata))) #else # define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \ EV_SET(ev, ident, filt, flags, fflags, data, udata) #endif namespace asio { namespace detail { kqueue_reactor::kqueue_reactor(asio::execution_context& ctx) : execution_context_service_base(ctx), scheduler_(use_service(ctx)), mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( REACTOR_REGISTRATION, scheduler_.concurrency_hint())), kqueue_fd_(do_kqueue_create()), interrupter_(), shutdown_(false), registered_descriptors_mutex_(mutex_.enabled()) { struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code error(errno, asio::error::get_system_category()); asio::detail::throw_error(error); } } kqueue_reactor::~kqueue_reactor() { close(kqueue_fd_); } void kqueue_reactor::shutdown() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); scheduler_.abandon_operations(ops); } void kqueue_reactor::notify_fork( asio::execution_context::fork_event fork_ev) { if (fork_ev == asio::execution_context::fork_child) { // The kqueue descriptor is automatically closed in the child. kqueue_fd_ = -1; kqueue_fd_ = do_kqueue_create(); interrupter_.recreate(); struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(), EVFILT_READ, EV_ADD, 0, 0, &interrupter_); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue interrupter registration"); } // Re-register all descriptors with kqueue. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { if (state->num_kevents_ > 0) { ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state); ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state); if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue re-registration"); } } } } } void kqueue_reactor::init_task() { scheduler_.init_task(); } int kqueue_reactor::register_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); ASIO_HANDLER_REACTOR_REGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 0; descriptor_data->shutdown_ = false; return 0; } int kqueue_reactor::register_internal_descriptor( int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); ASIO_HANDLER_REACTOR_REGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); mutex::scoped_lock lock(descriptor_data->mutex_); descriptor_data->descriptor_ = descriptor; descriptor_data->num_kevents_ = 1; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); struct kevent events[1]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1) return errno; return 0; } void kqueue_reactor::move_descriptor(socket_type, kqueue_reactor::per_descriptor_data& target_descriptor_data, kqueue_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void kqueue_reactor::start_op(int op_type, socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { static const int num_kevents[max_ops] = { 1, 2, 1 }; if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (op->perform()) { descriptor_lock.unlock(); scheduler_.post_immediate_completion(op, is_continuation); return; } if (descriptor_data->num_kevents_ < num_kevents[op_type]) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1) { descriptor_data->num_kevents_ = num_kevents[op_type]; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); scheduler_.post_immediate_completion(op, is_continuation); return; } } } else { if (descriptor_data->num_kevents_ < num_kevents[op_type]) descriptor_data->num_kevents_ = num_kevents[op_type]; struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, descriptor_data); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } } descriptor_data->op_queue_[op_type].push(op); scheduler_.work_started(); } void kqueue_reactor::cancel_ops(socket_type, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); scheduler_.post_deferred_completions(ops); } void kqueue_reactor::deregister_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the kqueue when it // is closed. } else { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); ASIO_HANDLER_REACTOR_DEREGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); scheduler_.post_deferred_completions(ops); // Leave descriptor_data set so that it will be freed by the subsequent // call to cleanup_descriptor_data. } else { // We are shutting down, so prevent cleanup_descriptor_data from freeing // the descriptor_data object and let the destructor free it instead. descriptor_data = 0; } } void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor, kqueue_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { struct kevent events[2]; ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ, EV_DELETE, 0, 0, 0); ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); ASIO_HANDLER_REACTOR_DEREGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); // Leave descriptor_data set so that it will be freed by the subsequent // call to cleanup_descriptor_data. } else { // We are shutting down, so prevent cleanup_descriptor_data from freeing // the descriptor_data object and let the destructor free it instead. descriptor_data = 0; } } void kqueue_reactor::cleanup_descriptor_data( per_descriptor_data& descriptor_data) { if (descriptor_data) { free_descriptor_state(descriptor_data); descriptor_data = 0; } } void kqueue_reactor::run(long usec, op_queue& ops) { mutex::scoped_lock lock(mutex_); // Determine how long to block while waiting for events. timespec timeout_buf = { 0, 0 }; timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf; lock.unlock(); // Block on the kqueue descriptor. struct kevent events[128]; int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout); #if defined(ASIO_ENABLE_HANDLER_TRACKING) // Trace the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = reinterpret_cast(events[i].udata); if (ptr != &interrupter_) { unsigned event_mask = 0; switch (events[i].filter) { case EVFILT_READ: event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT; break; case EVFILT_WRITE: event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT; break; } if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0) event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT; ASIO_HANDLER_REACTOR_EVENTS((context(), reinterpret_cast(ptr), event_mask)); } } #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = reinterpret_cast(events[i].udata); if (ptr == &interrupter_) { interrupter_.reset(); } else { descriptor_state* descriptor_data = static_cast(ptr); mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (events[i].filter == EVFILT_WRITE && descriptor_data->num_kevents_ == 2 && descriptor_data->op_queue_[write_op].empty()) { // Some descriptor types, like serial ports, don't seem to support // EV_CLEAR with EVFILT_WRITE. Since we have no pending write // operations we'll remove the EVFILT_WRITE registration here so that // we don't end up in a tight spin. struct kevent delete_events[1]; ASIO_KQUEUE_EV_SET(&delete_events[0], descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0); ::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0); descriptor_data->num_kevents_ = 1; } // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. #if defined(__NetBSD__) static const unsigned int filter[max_ops] = #else static const int filter[max_ops] = #endif { EVFILT_READ, EVFILT_WRITE, EVFILT_READ }; for (int j = max_ops - 1; j >= 0; --j) { if (events[i].filter == filter[j]) { if (j != except_op || events[i].flags & EV_OOBAND) { while (reactor_op* op = descriptor_data->op_queue_[j].front()) { if (events[i].flags & EV_ERROR) { op->ec_ = asio::error_code( static_cast(events[i].data), asio::error::get_system_category()); descriptor_data->op_queue_[j].pop(); ops.push(op); } if (op->perform()) { descriptor_data->op_queue_[j].pop(); ops.push(op); } else break; } } } } } } lock.lock(); timer_queues_.get_ready_timers(ops); } void kqueue_reactor::interrupt() { interrupter_.interrupt(); } int kqueue_reactor::do_kqueue_create() { int fd = ::kqueue(); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "kqueue"); } return fd; } kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING( REACTOR_IO, scheduler_.concurrency_hint())); } void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timespec* kqueue_reactor::get_timeout(long usec, timespec& ts) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. const long max_usec = 5 * 60 * 1000 * 1000; usec = timer_queues_.wait_duration_usec( (usec < 0 || max_usec < usec) ? max_usec : usec); ts.tv_sec = usec / 1000000; ts.tv_nsec = (usec % 1000000) * 1000; return &ts; } } // namespace detail } // namespace asio #undef ASIO_KQUEUE_EV_SET #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP galera-4-26.4.25/asio/asio/detail/impl/eventfd_select_interrupter.ipp000644 000164 177776 00000010601 15107057155 026752 0ustar00jenkinsnogroup000000 000000 // // detail/impl/eventfd_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include #include #include #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # include #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 #include "asio/detail/cstdint.hpp" #include "asio/detail/eventfd_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { eventfd_select_interrupter::eventfd_select_interrupter() { open_descriptors(); } void eventfd_select_interrupter::open_descriptors() { #if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } #else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 # if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) write_descriptor_ = read_descriptor_ = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); # else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) errno = EINVAL; write_descriptor_ = read_descriptor_ = -1; # endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK) if (read_descriptor_ == -1 && errno == EINVAL) { write_descriptor_ = read_descriptor_ = ::eventfd(0, 0); if (read_descriptor_ != -1) { ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); } } #endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 if (read_descriptor_ == -1) { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "eventfd_select_interrupter"); } } } eventfd_select_interrupter::~eventfd_select_interrupter() { close_descriptors(); } void eventfd_select_interrupter::close_descriptors() { if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_) ::close(write_descriptor_); if (read_descriptor_ != -1) ::close(read_descriptor_); } void eventfd_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void eventfd_select_interrupter::interrupt() { uint64_t counter(1UL); int result = ::write(write_descriptor_, &counter, sizeof(uint64_t)); (void)result; } bool eventfd_select_interrupter::reset() { if (write_descriptor_ == read_descriptor_) { for (;;) { // Only perform one read. The kernel maintains an atomic counter. uint64_t counter(0); errno = 0; int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); return was_interrupted; } } else { for (;;) { // Clear all data from the pipe. char data[1024]; int bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP galera-4-26.4.25/asio/asio/detail/impl/posix_event.ipp000644 000164 177776 00000003160 15107057155 023662 0ustar00jenkinsnogroup000000 000000 // // detail/impl/posix_event.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_event.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_event::posix_event() : state_(0) { #if (defined(__MACH__) && defined(__APPLE__)) \ || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) int error = ::pthread_cond_init(&cond_, 0); #else // (defined(__MACH__) && defined(__APPLE__)) // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) ::pthread_condattr_t attr; ::pthread_condattr_init(&attr); int error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); if (error == 0) error = ::pthread_cond_init(&cond_, &attr); #endif // (defined(__MACH__) && defined(__APPLE__)) // || (defined(__ANDROID__) && (__ANDROID_API__ < 21)) asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP galera-4-26.4.25/asio/asio/detail/impl/win_iocp_socket_service_base.ipp000644 000164 177776 00000057507 15107057155 027226 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_iocp_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_socket_service_base::win_iocp_socket_service_base( execution_context& context) : context_(context), iocp_service_(use_service(context)), reactor_(0), connect_ex_(0), nt_set_info_(0), mutex_(), impl_list_(0) { } void win_iocp_socket_service_base::base_shutdown() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_socket_service_base::construct( win_iocp_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_construct( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_socket_service_base::base_move_assign( win_iocp_socket_service_base::base_implementation_type& impl, win_iocp_socket_service_base& other_service, win_iocp_socket_service_base::base_implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; impl.cancel_token_ = other_impl.cancel_token_; other_impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_socket_service_base::destroy( win_iocp_socket_service_base::base_implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_socket_service_base::close( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((iocp_service_.context(), "socket", &impl, impl.socket_, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. select_reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); socket_ops::close(impl.socket_, impl.state_, false, ec); if (r) r->cleanup_descriptor_data(impl.reactor_data_); } else { ec = asio::error_code(); } impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) return ec; } socket_type win_iocp_socket_service_base::release( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) return invalid_socket; cancel(impl, ec); if (ec) return invalid_socket; nt_set_info_fn fn = get_nt_set_info(); if (fn == 0) { ec = asio::error::operation_not_supported; return invalid_socket; } HANDLE sock_as_handle = reinterpret_cast(impl.socket_); ULONG_PTR iosb[2] = { 0, 0 }; void* info[2] = { 0, 0 }; if (fn(sock_as_handle, iosb, &info, sizeof(info), 61 /* FileReplaceCompletionInformation */)) { ec = asio::error::operation_not_supported; return invalid_socket; } socket_type tmp = impl.socket_; impl.socket_ = invalid_socket; return tmp; } asio::error_code win_iocp_socket_service_base::cancel( win_iocp_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION((iocp_service_.context(), "socket", &impl, impl.socket_, "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = reinterpret_cast( reinterpret_cast(cancel_io_ex_ptr)); socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!cancel_io_ex(sock_as_handle, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } #if defined(ASIO_ENABLE_CANCELIO) else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. socket_type sock = impl.socket_; HANDLE sock_as_handle = reinterpret_cast(sock); if (!::CancelIo(sock_as_handle)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } #else // defined(ASIO_ENABLE_CANCELIO) else { // Cancellation is not supported as CancelIo may not be used. ec = asio::error::operation_not_supported; } #endif // defined(ASIO_ENABLE_CANCELIO) // Cancel any operations started via the reactor. if (!ec) { select_reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->cancel_ops(impl.socket_, impl.reactor_data_); } return ec; } asio::error_code win_iocp_socket_service_base::do_open( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(family, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; HANDLE sock_as_handle = reinterpret_cast(sock.get()); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } asio::error_code win_iocp_socket_service_base::do_assign( win_iocp_socket_service_base::base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } HANDLE sock_as_handle = reinterpret_cast(native_socket); if (iocp_service_.register_handle(sock_as_handle, ec)) return ec; impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.cancel_token_.reset(static_cast(0), socket_ops::noop_deleter()); ec = asio::error_code(); return ec; } void win_iocp_socket_service_base::start_send_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASend(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_send_to_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; int result = ::WSASendTo(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (noop) iocp_service_.on_completion(op); else if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecv(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_NETNAME_DELETED) last_error = WSAECONNRESET; else if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_null_buffers_receive_op( win_iocp_socket_service_base::base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op) { if ((impl.state_ & socket_ops::stream_oriented) != 0) { // For stream sockets on Windows, we may issue a 0-byte overlapped // WSARecv to wait until there is data available on the socket. ::WSABUF buf = { 0, 0 }; start_receive_op(impl, &buf, 1, flags, false, op); } else { start_reactor_op(impl, (flags & socket_base::message_out_of_band) ? select_reactor::except_op : select_reactor::read_op, op); } } void win_iocp_socket_service_base::start_receive_from_op( win_iocp_socket_service_base::base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else { DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = ::WSARecvFrom(impl.socket_, buffers, static_cast(buffer_count), &bytes_transferred, &recv_flags, addr, addrlen, op, 0); DWORD last_error = ::WSAGetLastError(); if (last_error == ERROR_PORT_UNREACHABLE) last_error = WSAECONNREFUSED; if (result != 0 && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error, bytes_transferred); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_accept_op( win_iocp_socket_service_base::base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) iocp_service_.on_completion(op, asio::error::bad_descriptor); else if (peer_is_open) iocp_service_.on_completion(op, asio::error::already_open); else { asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } } void win_iocp_socket_service_base::restart_accept_op( socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op) { new_socket.reset(); iocp_service_.work_started(); asio::error_code ec; new_socket.reset(socket_ops::socket(family, type, protocol, ec)); if (new_socket.get() == invalid_socket) iocp_service_.on_completion(op, ec); else { DWORD bytes_read = 0; BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer, 0, address_length, address_length, &bytes_read, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); } } void win_iocp_socket_service_base::start_reactor_op( win_iocp_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op) { select_reactor& r = get_reactor(); update_cancellation_thread_id(impl); if (is_open(impl)) { r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false); return; } else op->ec_ = asio::error::bad_descriptor; iocp_service_.post_immediate_completion(op, false); } void win_iocp_socket_service_base::start_connect_op( win_iocp_socket_service_base::base_implementation_type& impl, int family, int type, const socket_addr_type* addr, std::size_t addrlen, win_iocp_socket_connect_op_base* op) { // If ConnectEx is available, use that. if (family == ASIO_OS_DEF(AF_INET) || family == ASIO_OS_DEF(AF_INET6)) { if (connect_ex_fn connect_ex = get_connect_ex(impl, type)) { union address_union { socket_addr_type base; sockaddr_in4_type v4; sockaddr_in6_type v6; } a; using namespace std; // For memset. memset(&a, 0, sizeof(a)); a.base.sa_family = family; socket_ops::bind(impl.socket_, &a.base, family == ASIO_OS_DEF(AF_INET) ? sizeof(a.v4) : sizeof(a.v6), op->ec_); if (op->ec_ && op->ec_ != asio::error::invalid_argument) { iocp_service_.post_immediate_completion(op, false); return; } op->connect_ex_ = true; update_cancellation_thread_id(impl); iocp_service_.work_started(); BOOL result = connect_ex(impl.socket_, addr, static_cast(addrlen), 0, 0, 0, op); DWORD last_error = ::WSAGetLastError(); if (!result && last_error != WSA_IO_PENDING) iocp_service_.on_completion(op, last_error); else iocp_service_.on_pending(op); return; } } // Otherwise, fall back to a reactor-based implementation. select_reactor& r = get_reactor(); update_cancellation_thread_id(impl); if ((impl.state_ & socket_ops::non_blocking) != 0 || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); r.start_op(select_reactor::connect_op, impl.socket_, impl.reactor_data_, op, false, false); return; } } } r.post_immediate_completion(op, false); } void win_iocp_socket_service_base::close_for_destruction( win_iocp_socket_service_base::base_implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((iocp_service_.context(), "socket", &impl, impl.socket_, "close")); // Check if the reactor was created, in which case we need to close the // socket on the reactor as well to cancel any operations that might be // running there. select_reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (r) r->deregister_descriptor(impl.socket_, impl.reactor_data_, true); asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); if (r) r->cleanup_descriptor_data(impl.reactor_data_); } impl.socket_ = invalid_socket; impl.state_ = 0; impl.cancel_token_.reset(); #if defined(ASIO_ENABLE_CANCELIO) impl.safe_cancellation_thread_id_ = 0; #endif // defined(ASIO_ENABLE_CANCELIO) } void win_iocp_socket_service_base::update_cancellation_thread_id( win_iocp_socket_service_base::base_implementation_type& impl) { #if defined(ASIO_ENABLE_CANCELIO) if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); #else // defined(ASIO_ENABLE_CANCELIO) (void)impl; #endif // defined(ASIO_ENABLE_CANCELIO) } select_reactor& win_iocp_socket_service_base::get_reactor() { select_reactor* r = static_cast( interlocked_compare_exchange_pointer( reinterpret_cast(&reactor_), 0, 0)); if (!r) { r = &(use_service(context_)); interlocked_exchange_pointer(reinterpret_cast(&reactor_), r); } return *r; } win_iocp_socket_service_base::connect_ex_fn win_iocp_socket_service_base::get_connect_ex( win_iocp_socket_service_base::base_implementation_type& impl, int type) { #if defined(ASIO_DISABLE_CONNECTEX) (void)impl; (void)type; return 0; #else // defined(ASIO_DISABLE_CONNECTEX) if (type != ASIO_OS_DEF(SOCK_STREAM) && type != ASIO_OS_DEF(SOCK_SEQPACKET)) return 0; void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0); if (!ptr) { GUID guid = { 0x25a207b9, 0xddf3, 0x4660, { 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } }; DWORD bytes = 0; if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0) { // Set connect_ex_ to a special value to indicate that ConnectEx is // unavailable. That way we won't bother trying to look it up again. ptr = this; } interlocked_exchange_pointer(&connect_ex_, ptr); } return reinterpret_cast(ptr == this ? 0 : ptr); #endif // defined(ASIO_DISABLE_CONNECTEX) } win_iocp_socket_service_base::nt_set_info_fn win_iocp_socket_service_base::get_nt_set_info() { void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0); if (!ptr) { if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL")) ptr = reinterpret_cast(GetProcAddress(h, "NtSetInformationFile")); // On failure, set nt_set_info_ to a special value to indicate that the // NtSetInformationFile function is unavailable. That way we won't bother // trying to look it up again. interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this); } return reinterpret_cast(ptr == this ? 0 : ptr); } void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp) { #if defined(_M_IX86) return reinterpret_cast(InterlockedCompareExchange( reinterpret_cast(dest), reinterpret_cast(exch), reinterpret_cast(cmp))); #else return InterlockedCompareExchangePointer(dest, exch, cmp); #endif } void* win_iocp_socket_service_base::interlocked_exchange_pointer( void** dest, void* val) { #if defined(_M_IX86) return reinterpret_cast(InterlockedExchange( reinterpret_cast(dest), reinterpret_cast(val))); #else return InterlockedExchangePointer(dest, val); #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP galera-4-26.4.25/asio/asio/detail/impl/buffer_sequence_adapter.ipp000644 000164 177776 00000005637 15107057155 026173 0ustar00jenkinsnogroup000000 000000 // // detail/impl/buffer_sequence_adapter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include #include #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_buffer_impl : public Microsoft::WRL::RuntimeClass< Microsoft::WRL::RuntimeClassFlags< Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>, ABI::Windows::Storage::Streams::IBuffer, Windows::Storage::Streams::IBufferByteAccess> { public: explicit winrt_buffer_impl(const asio::const_buffer& b) { bytes_ = const_cast(static_cast(b.data())); length_ = b.size(); capacity_ = b.size(); } explicit winrt_buffer_impl(const asio::mutable_buffer& b) { bytes_ = static_cast(b.data()); length_ = 0; capacity_ = b.size(); } ~winrt_buffer_impl() { } STDMETHODIMP Buffer(byte** value) { *value = bytes_; return S_OK; } STDMETHODIMP get_Capacity(UINT32* value) { *value = capacity_; return S_OK; } STDMETHODIMP get_Length(UINT32 *value) { *value = length_; return S_OK; } STDMETHODIMP put_Length(UINT32 value) { if (value > capacity_) return E_INVALIDARG; length_ = value; return S_OK; } private: byte* bytes_; UINT32 length_; UINT32 capacity_; }; void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::mutable_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); buf = reinterpret_cast(insp.Get()); } void buffer_sequence_adapter_base::init_native_buffer( buffer_sequence_adapter_base::native_buffer_type& buf, const asio::const_buffer& buffer) { std::memset(&buf, 0, sizeof(native_buffer_type)); Microsoft::WRL::ComPtr insp = Microsoft::WRL::Make(buffer); Platform::Object^ buf_obj = reinterpret_cast(insp.Get()); buf = reinterpret_cast(insp.Get()); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP galera-4-26.4.25/asio/asio/detail/impl/socket_select_interrupter.ipp000644 000164 177776 00000012607 15107057155 026617 0ustar00jenkinsnogroup000000 000000 // // detail/impl/socket_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_select_interrupter.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { socket_select_interrupter::socket_select_interrupter() { open_descriptors(); } void socket_select_interrupter::open_descriptors() { asio::error_code ec; socket_holder acceptor(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (acceptor.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); int opt = 1; socket_ops::state_type acceptor_state = 0; socket_ops::setsockopt(acceptor.get(), acceptor_state, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec); using namespace std; // For memset. sockaddr_in4_type addr; std::size_t addr_len = sizeof(addr); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); addr.sin_port = 0; if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr, &addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); // Some broken firewalls on Windows will intermittently cause getsockname to // return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We // explicitly specify the target address here to work around this problem. if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY)) addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK); if (socket_ops::listen(acceptor.get(), SOMAXCONN, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder client(socket_ops::socket( AF_INET, SOCK_STREAM, IPPROTO_TCP, ec)); if (client.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr, addr_len, ec) == socket_error_retval) asio::detail::throw_error(ec, "socket_select_interrupter"); socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec)); if (server.get() == invalid_socket) asio::detail::throw_error(ec, "socket_select_interrupter"); ioctl_arg_type non_blocking = 1; socket_ops::state_type client_state = 0; if (socket_ops::ioctl(client.get(), client_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(client.get(), client_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); non_blocking = 1; socket_ops::state_type server_state = 0; if (socket_ops::ioctl(server.get(), server_state, FIONBIO, &non_blocking, ec)) asio::detail::throw_error(ec, "socket_select_interrupter"); opt = 1; socket_ops::setsockopt(server.get(), server_state, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec); read_descriptor_ = server.release(); write_descriptor_ = client.release(); } socket_select_interrupter::~socket_select_interrupter() { close_descriptors(); } void socket_select_interrupter::close_descriptors() { asio::error_code ec; socket_ops::state_type state = socket_ops::internal_non_blocking; if (read_descriptor_ != invalid_socket) socket_ops::close(read_descriptor_, state, true, ec); if (write_descriptor_ != invalid_socket) socket_ops::close(write_descriptor_, state, true, ec); } void socket_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = invalid_socket; read_descriptor_ = invalid_socket; open_descriptors(); } void socket_select_interrupter::interrupt() { char byte = 0; socket_ops::buf b; socket_ops::init_buf(b, &byte, 1); asio::error_code ec; socket_ops::send(write_descriptor_, &b, 1, 0, ec); } bool socket_select_interrupter::reset() { char data[1024]; socket_ops::buf b; socket_ops::init_buf(b, data, sizeof(data)); asio::error_code ec; int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec); return was_interrupted; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP galera-4-26.4.25/asio/asio/detail/impl/winrt_timer_scheduler.hpp000644 000164 177776 00000004774 15107057155 025733 0ustar00jenkinsnogroup000000 000000 // // detail/impl/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void winrt_timer_scheduler::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void winrt_timer_scheduler::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void winrt_timer_scheduler::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { scheduler_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); scheduler_.work_started(); if (earliest) event_.signal(lock); } template std::size_t winrt_timer_scheduler::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); scheduler_.post_deferred_completions(ops); return n; } template void winrt_timer_scheduler::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& to, typename timer_queue::per_timer_data& from) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; queue.cancel_timer(to, ops); queue.move_timer(to, from); lock.unlock(); scheduler_.post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP galera-4-26.4.25/asio/asio/detail/impl/handler_tracking.ipp000644 000164 177776 00000023506 15107057155 024624 0ustar00jenkinsnogroup000000 000000 // // detail/impl/handler_tracking.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_CUSTOM_HANDLER_TRACKING) // The handler tracking implementation is provided by the user-specified header. #elif defined(ASIO_ENABLE_HANDLER_TRACKING) #include #include #include "asio/detail/handler_tracking.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/time_traits.hpp" #elif defined(ASIO_HAS_CHRONO) # include "asio/detail/chrono.hpp" # include "asio/detail/chrono_time_traits.hpp" # include "asio/wait_traits.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/socket_types.hpp" #elif !defined(ASIO_WINDOWS) # include #endif // !defined(ASIO_WINDOWS) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct handler_tracking_timestamp { uint64_t seconds; uint64_t microseconds; handler_tracking_timestamp() { #if defined(ASIO_HAS_BOOST_DATE_TIME) boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); boost::posix_time::time_duration now = boost::posix_time::microsec_clock::universal_time() - epoch; #elif defined(ASIO_HAS_CHRONO) typedef chrono_time_traits > traits_helper; traits_helper::posix_time_duration now( chrono::system_clock::now().time_since_epoch()); #endif seconds = static_cast(now.total_seconds()); microseconds = static_cast(now.total_microseconds() % 1000000); } }; struct handler_tracking::tracking_state { static_mutex mutex_; uint64_t next_id_; tss_ptr* current_completion_; }; handler_tracking::tracking_state* handler_tracking::get_state() { static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0 }; return &state; } void handler_tracking::init() { static tracking_state* state = get_state(); state->mutex_.init(); static_mutex::scoped_lock lock(state->mutex_); if (state->current_completion_ == 0) state->current_completion_ = new tss_ptr; } void handler_tracking::creation(execution_context&, handler_tracking::tracked_handler& h, const char* object_type, void* object, uintmax_t /*native_handle*/, const char* op_name) { static tracking_state* state = get_state(); static_mutex::scoped_lock lock(state->mutex_); h.id_ = state->next_id_++; lock.unlock(); handler_tracking_timestamp timestamp; uint64_t current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, h.id_, object_type, object, op_name); } handler_tracking::completion::completion( const handler_tracking::tracked_handler& h) : id_(h.id_), invoked_(false), next_(*get_state()->current_completion_) { *get_state()->current_completion_ = this; } handler_tracking::completion::~completion() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%c%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%c%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, invoked_ ? '!' : '~', id_); } *get_state()->current_completion_ = next_; } void handler_tracking::completion::invocation_begin() { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value()); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), static_cast(bytes_transferred)); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, int signal_number) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), signal_number); invoked_ = true; } void handler_tracking::completion::invocation_begin( const asio::error_code& ec, const char* arg) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_, ec.category().name(), ec.value(), arg); invoked_ = true; } void handler_tracking::completion::invocation_end() { if (id_) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|<%I64u|\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|<%llu|\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, id_); id_ = 0; } } void handler_tracking::operation(execution_context&, const char* object_type, void* object, uintmax_t /*native_handle*/, const char* op_name) { static tracking_state* state = get_state(); handler_tracking_timestamp timestamp; unsigned long long current_id = 0; if (completion* current_completion = *state->current_completion_) current_id = current_completion->id_; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, current_id, object_type, object, op_name); } void handler_tracking::reactor_registration(execution_context& /*context*/, uintmax_t /*native_handle*/, uintmax_t /*registration*/) { } void handler_tracking::reactor_deregistration(execution_context& /*context*/, uintmax_t /*native_handle*/, uintmax_t /*registration*/) { } void handler_tracking::reactor_events(execution_context& /*context*/, uintmax_t /*native_handle*/, unsigned /*events*/) { } void handler_tracking::reactor_operation( const tracked_handler& h, const char* op_name, const asio::error_code& ec) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, h.id_, op_name, ec.category().name(), ec.value()); } void handler_tracking::reactor_operation( const tracked_handler& h, const char* op_name, const asio::error_code& ec, std::size_t bytes_transferred) { handler_tracking_timestamp timestamp; write_line( #if defined(ASIO_WINDOWS) "@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n", #else // defined(ASIO_WINDOWS) "@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n", #endif // defined(ASIO_WINDOWS) timestamp.seconds, timestamp.microseconds, h.id_, op_name, ec.category().name(), ec.value(), static_cast(bytes_transferred)); } void handler_tracking::write_line(const char* format, ...) { using namespace std; // For sprintf (or equivalent). va_list args; va_start(args, format); char line[256] = ""; #if defined(ASIO_HAS_SECURE_RTL) int length = vsprintf_s(line, sizeof(line), format, args); #else // defined(ASIO_HAS_SECURE_RTL) int length = vsprintf(line, format, args); #endif // defined(ASIO_HAS_SECURE_RTL) va_end(args); #if defined(ASIO_WINDOWS_RUNTIME) wchar_t wline[256] = L""; mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length); ::OutputDebugStringW(wline); #elif defined(ASIO_WINDOWS) HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE); DWORD bytes_written = 0; ::WriteFile(stderr_handle, line, length, &bytes_written, 0); #else // defined(ASIO_WINDOWS) ::write(STDERR_FILENO, line, length); #endif // defined(ASIO_WINDOWS) } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP galera-4-26.4.25/asio/asio/detail/impl/winrt_ssocket_service_base.ipp000644 000164 177776 00000040331 15107057155 026730 0ustar00jenkinsnogroup000000 000000 // // detail/impl/winrt_ssocket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_ssocket_service_base::winrt_ssocket_service_base( execution_context& context) : scheduler_(use_service(context)), async_manager_(use_service(context)), mutex_(), impl_list_(0) { } void winrt_ssocket_service_base::base_shutdown() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); base_implementation_type* impl = impl_list_; while (impl) { asio::error_code ignored_ec; close(*impl, ignored_ec); impl = impl->next_; } } void winrt_ssocket_service_base::construct( winrt_ssocket_service_base::base_implementation_type& impl) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_construct( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void winrt_ssocket_service_base::base_move_assign( winrt_ssocket_service_base::base_implementation_type& impl, winrt_ssocket_service_base& other_service, winrt_ssocket_service_base::base_implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.socket_ = other_impl.socket_; other_impl.socket_ = nullptr; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void winrt_ssocket_service_base::destroy( winrt_ssocket_service_base::base_implementation_type& impl) { asio::error_code ignored_ec; close(impl, ignored_ec); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code winrt_ssocket_service_base::close( winrt_ssocket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (impl.socket_) { delete impl.socket_; impl.socket_ = nullptr; } ec = asio::error_code(); return ec; } winrt_ssocket_service_base::native_handle_type winrt_ssocket_service_base::release( winrt_ssocket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) return nullptr; cancel(impl, ec); if (ec) return nullptr; native_handle_type tmp = impl.socket_; impl.socket_ = nullptr; return tmp; } std::size_t winrt_ssocket_service_base::do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return addr_len; } try { std::string addr_string = winrt_utils::string(local ? impl.socket_->Information->LocalAddress->CanonicalName : impl.socket_->Information->RemoteAddress->CanonicalName); unsigned short port = winrt_utils::integer(local ? impl.socket_->Information->LocalPort : impl.socket_->Information->RemotePort); unsigned long scope = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): if (addr_len < sizeof(sockaddr_in4_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(), &reinterpret_cast(addr)->sin_addr, &scope, ec); reinterpret_cast(addr)->sin_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in4_type); } case ASIO_OS_DEF(AF_INET6): if (addr_len < sizeof(sockaddr_in6_type)) { ec = asio::error::invalid_argument; return addr_len; } else { socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(), &reinterpret_cast(addr)->sin6_addr, &scope, ec); reinterpret_cast(addr)->sin6_port = socket_ops::host_to_network_short(port); ec = asio::error_code(); return sizeof(sockaddr_in6_type); } default: ec = asio::error::address_family_not_supported; return addr_len; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return addr_len; } } asio::error_code winrt_ssocket_service_base::do_set_option( winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->KeepAlive = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (optlen == sizeof(int)) { int value = 0; std::memcpy(&value, optval, optlen); impl.socket_->Control->NoDelay = !!value; ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::do_get_option( const winrt_ssocket_service_base::base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return; } try { if (level == ASIO_OS_DEF(SOL_SOCKET) && optname == ASIO_OS_DEF(SO_KEEPALIVE)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->KeepAlive ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else if (level == ASIO_OS_DEF(IPPROTO_TCP) && optname == ASIO_OS_DEF(TCP_NODELAY)) { if (*optlen >= sizeof(int)) { int value = impl.socket_->Control->NoDelay ? 1 : 0; std::memcpy(optval, &value, sizeof(int)); *optlen = sizeof(int); ec = asio::error_code(); } else { ec = asio::error::invalid_argument; } } else { ec = asio::error::invalid_argument; } } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } } asio::error_code winrt_ssocket_service_base::do_connect( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } char addr_string[max_addr_v6_str_len]; unsigned short port; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, ec); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: ec = asio::error::address_family_not_supported; return ec; } if (!ec) try { async_manager_.sync(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } void winrt_ssocket_service_base::start_connect_op( winrt_ssocket_service_base::base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation) { if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; scheduler_.post_immediate_completion(op, is_continuation); return; } char addr_string[max_addr_v6_str_len]; unsigned short port = 0; switch (reinterpret_cast(addr)->sa_family) { case ASIO_OS_DEF(AF_INET): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET), &reinterpret_cast(addr)->sin_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin_port); break; case ASIO_OS_DEF(AF_INET6): socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6), &reinterpret_cast(addr)->sin6_addr, addr_string, sizeof(addr_string), 0, op->ec_); port = socket_ops::network_to_host_short( reinterpret_cast(addr)->sin6_port); break; default: op->ec_ = asio::error::address_family_not_supported; break; } if (op->ec_) { scheduler_.post_immediate_completion(op, is_continuation); return; } try { async_manager_.async(impl.socket_->ConnectAsync( ref new Windows::Networking::HostName( winrt_utils::string(addr_string)), winrt_utils::string(port)), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code( e->HResult, asio::system_category()); scheduler_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_send( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } return async_manager_.sync( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_send_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; scheduler_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; scheduler_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { scheduler_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); scheduler_.post_immediate_completion(op, is_continuation); } } std::size_t winrt_ssocket_service_base::do_receive( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec) { if (flags) { ec = asio::error::operation_not_supported; return 0; } if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { ec = asio::error_code(); return 0; } async_manager_.sync( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), ec); std::size_t bytes_transferred = bufs.buffers()[0]->Length; if (bytes_transferred == 0 && !ec) { ec = asio::error::eof; } return bytes_transferred; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return 0; } } void winrt_ssocket_service_base::start_receive_op( winrt_ssocket_service_base::base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation) { if (flags) { op->ec_ = asio::error::operation_not_supported; scheduler_.post_immediate_completion(op, is_continuation); return; } if (!is_open(impl)) { op->ec_ = asio::error::bad_descriptor; scheduler_.post_immediate_completion(op, is_continuation); return; } try { buffer_sequence_adapter bufs(asio::buffer(data)); if (bufs.all_empty()) { scheduler_.post_immediate_completion(op, is_continuation); return; } async_manager_.async( impl.socket_->InputStream->ReadAsync( bufs.buffers()[0], bufs.buffers()[0]->Capacity, Windows::Storage::Streams::InputStreamOptions::Partial), op); } catch (Platform::Exception^ e) { op->ec_ = asio::error_code(e->HResult, asio::system_category()); scheduler_.post_immediate_completion(op, is_continuation); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP galera-4-26.4.25/asio/asio/detail/impl/resolver_service_base.ipp000644 000164 177776 00000007341 15107057155 025677 0ustar00jenkinsnogroup000000 000000 // // detail/impl/resolver_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/resolver_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base::work_scheduler_runner { public: work_scheduler_runner(scheduler_impl& work_scheduler) : work_scheduler_(work_scheduler) { } void operator()() { asio::error_code ec; work_scheduler_.run(ec); } private: scheduler_impl& work_scheduler_; }; resolver_service_base::resolver_service_base(execution_context& context) : scheduler_(asio::use_service(context)), work_scheduler_(new scheduler_impl(context, -1, false)), work_thread_(0) { work_scheduler_->work_started(); } resolver_service_base::~resolver_service_base() { base_shutdown(); } void resolver_service_base::base_shutdown() { if (work_scheduler_.get()) { work_scheduler_->work_finished(); work_scheduler_->stop(); if (work_thread_.get()) { work_thread_->join(); work_thread_.reset(); } work_scheduler_.reset(); } } void resolver_service_base::base_notify_fork( execution_context::fork_event fork_ev) { if (work_thread_.get()) { if (fork_ev == execution_context::fork_prepare) { work_scheduler_->stop(); work_thread_->join(); work_thread_.reset(); } else { work_scheduler_->restart(); work_thread_.reset(new asio::detail::thread( work_scheduler_runner(*work_scheduler_))); } } } void resolver_service_base::construct( resolver_service_base::implementation_type& impl) { impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::destroy( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION((scheduler_.context(), "resolver", &impl, 0, "cancel")); impl.reset(); } void resolver_service_base::move_construct(implementation_type& impl, implementation_type& other_impl) { impl = ASIO_MOVE_CAST(implementation_type)(other_impl); } void resolver_service_base::move_assign(implementation_type& impl, resolver_service_base&, implementation_type& other_impl) { destroy(impl); impl = ASIO_MOVE_CAST(implementation_type)(other_impl); } void resolver_service_base::cancel( resolver_service_base::implementation_type& impl) { ASIO_HANDLER_OPERATION((scheduler_.context(), "resolver", &impl, 0, "cancel")); impl.reset(static_cast(0), socket_ops::noop_deleter()); } void resolver_service_base::start_resolve_op(resolve_op* op) { if (ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, scheduler_.concurrency_hint())) { start_work_thread(); scheduler_.work_started(); work_scheduler_->post_immediate_completion(op, false); } else { op->ec_ = asio::error::operation_not_supported; scheduler_.post_immediate_completion(op, false); } } void resolver_service_base::start_work_thread() { asio::detail::mutex::scoped_lock lock(mutex_); if (!work_thread_.get()) { work_thread_.reset(new asio::detail::thread( work_scheduler_runner(*work_scheduler_))); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP galera-4-26.4.25/asio/asio/detail/impl/reactive_descriptor_service.ipp000644 000164 177776 00000013730 15107057155 027103 0ustar00jenkinsnogroup000000 000000 // // detail/impl/reactive_descriptor_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/error.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_descriptor_service::reactive_descriptor_service( execution_context& context) : execution_context_service_base(context), reactor_(asio::use_service(context)) { reactor_.init_task(); } void reactive_descriptor_service::shutdown() { } void reactive_descriptor_service::construct( reactive_descriptor_service::implementation_type& impl) { impl.descriptor_ = -1; impl.state_ = 0; } void reactive_descriptor_service::move_construct( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service::implementation_type& other_impl) { impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::move_assign( reactive_descriptor_service::implementation_type& impl, reactive_descriptor_service& other_service, reactive_descriptor_service::implementation_type& other_impl) { destroy(impl); impl.descriptor_ = other_impl.descriptor_; other_impl.descriptor_ = -1; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.descriptor_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_descriptor_service::destroy( reactive_descriptor_service::implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((reactor_.context(), "descriptor", &impl, impl.descriptor_, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); asio::error_code ignored_ec; descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec); reactor_.cleanup_descriptor_data(impl.reactor_data_); } } asio::error_code reactive_descriptor_service::assign( reactive_descriptor_service::implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_descriptor, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.descriptor_ = native_descriptor; impl.state_ = descriptor_ops::possible_dup; ec = asio::error_code(); return ec; } asio::error_code reactive_descriptor_service::close( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((reactor_.context(), "descriptor", &impl, impl.descriptor_, "close")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, (impl.state_ & descriptor_ops::possible_dup) == 0); descriptor_ops::close(impl.descriptor_, impl.state_, ec); reactor_.cleanup_descriptor_data(impl.reactor_data_); } else { ec = asio::error_code(); } // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour.) construct(impl); return ec; } reactive_descriptor_service::native_handle_type reactive_descriptor_service::release( reactive_descriptor_service::implementation_type& impl) { native_handle_type descriptor = impl.descriptor_; if (is_open(impl)) { ASIO_HANDLER_OPERATION((reactor_.context(), "descriptor", &impl, impl.descriptor_, "release")); reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false); reactor_.cleanup_descriptor_data(impl.reactor_data_); construct(impl); } return descriptor; } asio::error_code reactive_descriptor_service::cancel( reactive_descriptor_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION((reactor_.context(), "descriptor", &impl, impl.descriptor_, "cancel")); reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_); ec = asio::error_code(); return ec; } void reactive_descriptor_service::start_op( reactive_descriptor_service::implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & descriptor_ops::non_blocking) || descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.descriptor_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/socket_ops.ipp000644 000164 177776 00000302256 15107057155 023500 0ustar00jenkinsnogroup000000 000000 // // detail/impl/socket_ops.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_IPP #define ASIO_DETAIL_SOCKET_OPS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include "asio/detail/assert.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) \ || defined(__MACH__) && defined(__APPLE__) # if defined(ASIO_HAS_PTHREADS) # include # endif // defined(ASIO_HAS_PTHREADS) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // || defined(__MACH__) && defined(__APPLE__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) struct msghdr { int msg_namelen; }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) // HP-UX doesn't declare these functions extern "C", so they are declared again // here to avoid linker errors about undefined symbols. extern "C" char* if_indextoname(unsigned int, char*); extern "C" unsigned int if_nametoindex(const char*); #endif // defined(__hpux) #endif // !defined(ASIO_WINDOWS_RUNTIME) inline void clear_last_error() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) WSASetLastError(0); #else errno = 0; #endif } #if !defined(ASIO_WINDOWS_RUNTIME) template inline ReturnType error_wrapper(ReturnType return_value, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(WSAGetLastError(), asio::error::get_system_category()); #else ec = asio::error_code(errno, asio::error::get_system_category()); #endif return return_value; } template inline socket_type call_accept(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = addrlen ? (SockLenType)*addrlen : 0; socket_type result = ::accept(s, addr, addrlen ? &tmp_addrlen : 0); if (addrlen) *addrlen = (std::size_t)tmp_addrlen; return result; } socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return invalid_socket; } clear_last_error(); socket_type new_s = error_wrapper(call_accept( &msghdr::msg_namelen, s, addr, addrlen), ec); if (new_s == invalid_socket) return new_s; #if defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) int optval = 1; int result = error_wrapper(::setsockopt(new_s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(new_s); return invalid_socket; } #endif ec = asio::error_code(); return new_s; } socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { // Accept a socket. for (;;) { // Try to complete the operation without blocking. socket_type new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return new_socket; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { if (state & user_set_non_blocking) return invalid_socket; // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return invalid_socket; // Fall through to retry operation. } #endif // defined(EPROTO) else return invalid_socket; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, -1, ec) < 0) return invalid_socket; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_aborted; if (!ec) { // Get the address of the peer. if (addr && addrlen) { LPSOCKADDR local_addr = 0; int local_addr_length = 0; LPSOCKADDR remote_addr = 0; int remote_addr_length = 0; GetAcceptExSockaddrs(output_buffer, 0, address_length, address_length, &local_addr, &local_addr_length, &remote_addr, &remote_addr_length); if (static_cast(remote_addr_length) > *addrlen) { ec = asio::error::invalid_argument; } else { using namespace std; // For memcpy. memcpy(addr, remote_addr, remote_addr_length); *addrlen = static_cast(remote_addr_length); } } // Need to set the SO_UPDATE_ACCEPT_CONTEXT option so that getsockname // and getpeername will work on the accepted socket. SOCKET update_ctx_param = s; socket_ops::state_type state = 0; socket_ops::setsockopt(new_socket, state, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, &update_ctx_param, sizeof(SOCKET), ec); } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket) { for (;;) { // Accept the waiting connection. new_socket = socket_ops::accept(s, addr, addrlen, ec); // Check if operation succeeded. if (new_socket != invalid_socket) return true; // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Operation failed. if (ec == asio::error::would_block || ec == asio::error::try_again) { // Fall through to retry operation. } else if (ec == asio::error::connection_aborted) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #if defined(EPROTO) else if (ec.value() == EPROTO) { if (state & enable_connection_aborted) return true; // Fall through to retry operation. } #endif // defined(EPROTO) else return true; return false; } } #endif // defined(ASIO_HAS_IOCP) template inline int call_bind(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::bind(s, addr, (SockLenType)addrlen); } int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_bind( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec) { int result = 0; if (s != invalid_socket) { // We don't want the destructor to block, so set the socket to linger in // the background. If the user doesn't like this behaviour then they need // to explicitly close the socket. if (destruction && (state & user_set_linger)) { ::linger opt; opt.l_onoff = 0; opt.l_linger = 0; asio::error_code ignored_ec; socket_ops::setsockopt(s, state, SOL_SOCKET, SO_LINGER, &opt, sizeof(opt), ignored_ec); } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result != 0 && (ec == asio::error::would_block || ec == asio::error::try_again)) { // According to UNIX Network Programming Vol. 1, it is possible for // close() to fail with EWOULDBLOCK under certain circumstances. What // isn't clear is the state of the descriptor after this error. The one // current OS where this behaviour is seen, Windows, says that the socket // remains open. Therefore we'll put the descriptor back into blocking // mode and have another attempt at closing it. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = 0; ::ioctlsocket(s, FIONBIO, &arg); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(__SYMBIAN32__) int flags = ::fcntl(s, F_GETFL, 0); if (flags >= 0) ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK); # else // defined(__SYMBIAN32__) ioctl_arg_type arg = 0; ::ioctl(s, FIONBIO, &arg); # endif // defined(__SYMBIAN32__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) state &= ~non_blocking; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::closesocket(s), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) result = error_wrapper(::close(s), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } } if (result == 0) ec = asio::error_code(); return result; } bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= user_set_non_blocking; else { // Clearing the user-set non-blocking mode always overrides any // internally-set non-blocking flag. Any subsequent asynchronous // operations will need to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } return true; } return false; } bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } if (!value && (state & user_set_non_blocking)) { // It does not make sense to clear the internal non-blocking flag if the // user still wants non-blocking behaviour. Return an error and let the // caller figure out whether to update the user-set non-blocking flag. ec = asio::error::invalid_argument; return false; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec); #elif defined(__SYMBIAN32__) int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec); if (result >= 0) { clear_last_error(); int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK)); result = error_wrapper(::fcntl(s, F_SETFL, flag), ec); } #else ioctl_arg_type arg = (value ? 1 : 0); int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec); #endif if (result >= 0) { ec = asio::error_code(); if (value) state |= internal_non_blocking; else state &= ~internal_non_blocking; return true; } return false; } int shutdown(socket_type s, int what, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::shutdown(s, what), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_connect(SockLenType msghdr::*, socket_type s, const socket_addr_type* addr, std::size_t addrlen) { return ::connect(s, addr, (SockLenType)addrlen); } int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_connect( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); #if defined(__linux__) else if (ec == asio::error::try_again) ec = asio::error::no_buffer_space; #endif // defined(__linux__) return result; } void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { // Perform the connect operation. socket_ops::connect(s, addr, addrlen, ec); if (ec != asio::error::in_progress && ec != asio::error::would_block) { // The connect operation finished immediately. return; } // Wait for socket to become ready. if (socket_ops::poll_connect(s, -1, ec) < 0) return; // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == socket_error_retval) return; // Return the result of the connect operation. ec = asio::error_code(connect_error, asio::error::get_system_category()); } #if defined(ASIO_HAS_IOCP) void complete_iocp_connect(socket_type s, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. switch (ec.value()) { case ERROR_CONNECTION_REFUSED: ec = asio::error::connection_refused; break; case ERROR_NETWORK_UNREACHABLE: ec = asio::error::network_unreachable; break; case ERROR_HOST_UNREACHABLE: ec = asio::error::host_unreachable; break; case ERROR_SEM_TIMEOUT: ec = asio::error::timed_out; break; default: break; } if (!ec) { // Need to set the SO_UPDATE_CONNECT_CONTEXT option so that getsockname // and getpeername will work on the connected socket. socket_ops::state_type state = 0; const int so_update_connect_context = 0x7010; socket_ops::setsockopt(s, state, SOL_SOCKET, so_update_connect_context, 0, 0, ec); } } #endif // defined(ASIO_HAS_IOCP) bool non_blocking_connect(socket_type s, asio::error_code& ec) { // Check if the connect operation has finished. This is required since we may // get spurious readiness notifications from the reactor. #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); timeval zero_timeout; zero_timeout.tv_sec = 0; zero_timeout.tv_usec = 0; int ready = ::select(s + 1, 0, &write_fds, &except_fds, &zero_timeout); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int ready = ::poll(&fds, 1, 0); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (ready == 0) { // The asynchronous connect operation is still in progress. return false; } // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec) == 0) { if (connect_error) { ec = asio::error_code(connect_error, asio::error::get_system_category()); } else ec = asio::error_code(); } return true; } int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(af); (void)(type); (void)(protocol); (void)(sv); ec = asio::error::operation_not_supported; return socket_error_retval; #else clear_last_error(); int result = error_wrapper(::socketpair(af, type, protocol, sv), ec); if (result == 0) ec = asio::error_code(); return result; #endif } bool sockatmark(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return false; } #if defined(SIOCATMARK) ioctl_arg_type value = 0; # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, SIOCATMARK, &value), ec); # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, SIOCATMARK, &value), ec); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); # if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; # endif // defined(ENOTTY) #else // defined(SIOCATMARK) int value = error_wrapper(::sockatmark(s), ec); if (value != -1) ec = asio::error_code(); #endif // defined(SIOCATMARK) return ec ? false : value != 0; } size_t available(socket_type s, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } ioctl_arg_type value = 0; #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, FIONREAD, &value), ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctl(s, FIONREAD, &value), ec); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (result == 0) ec = asio::error_code(); #if defined(ENOTTY) if (ec.value() == ENOTTY) ec = asio::error::not_socket; #endif // defined(ENOTTY) return ec ? static_cast(0) : static_cast(value); } int listen(socket_type s, int backlog, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(::listen(s, backlog), ec); if (result == 0) ec = asio::error_code(); return result; } inline void init_buf_iov_base(void*& base, void* addr) { base = addr; } template inline void init_buf_iov_base(T& base, void* addr) { base = static_cast(addr); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void init_buf(buf& b, void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(data); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, data); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } void init_buf(buf& b, const void* data, size_t size) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) b.buf = static_cast(const_cast(data)); b.len = static_cast(size); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) init_buf_iov_base(b.iov_base, const_cast(data)); b.iov_len = size; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } inline void init_msghdr_msg_name(void*& name, socket_addr_type* addr) { name = addr; } inline void init_msghdr_msg_name(void*& name, const socket_addr_type* addr) { name = const_cast(addr); } template inline void init_msghdr_msg_name(T& name, socket_addr_type* addr) { name = reinterpret_cast(addr); } template inline void init_msghdr_msg_name(T& name, const socket_addr_type* addr) { name = reinterpret_cast(const_cast(addr)); } signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int result = error_wrapper(::WSARecv(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) result = 0; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes > 0) return bytes; // Check for EOF. if ((state & stream_oriented) && bytes == 0) { ec = asio::error::eof; return 0; } // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, -1, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) { ec.assign(0, ec.category()); } // Check for connection closed. else if (!ec && bytes_transferred == 0 && (state & stream_oriented) != 0 && !all_empty) { ec = asio::error::eof; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recv(s, bufs, count, flags, ec); // Check for end of stream. if (is_stream && bytes == 0) { ec = asio::error::eof; return true; } // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Receive some data. DWORD recv_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD recv_flags = flags; int tmp_addrlen = (int)*addrlen; int result = error_wrapper(::WSARecvFrom(s, bufs, recv_buf_count, &bytes_transferred, &recv_flags, addr, &tmp_addrlen, 0, 0), ec); *addrlen = (std::size_t)tmp_addrlen; if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) result = 0; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(*addrlen); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, flags), ec); *addrlen = msg.msg_namelen; if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, -1, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) { ec.assign(0, ec.category()); } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvfrom( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) out_flags = 0; return socket_ops::recv(s, bufs, count, in_flags, ec); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = bufs; msg.msg_iovlen = static_cast(count); signed_size_type result = error_wrapper(::recvmsg(s, &msg, in_flags), ec); if (result >= 0) { ec = asio::error_code(); out_flags = msg.msg_flags; } else out_flags = 0; return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_read(s, 0, -1, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } else if (ec.value() == WSAEMSGSIZE || ec.value() == ERROR_MORE_DATA) { ec.assign(0, ec.category()); } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Read some data. signed_size_type bytes = socket_ops::recvmsg( s, bufs, count, in_flags, out_flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; DWORD send_flags = flags; int result = error_wrapper(::WSASend(s, const_cast(bufs), send_buf_count, &bytes_transferred, send_flags, 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes to a stream is a no-op. if (all_empty && (state & stream_oriented)) { ec = asio::error_code(); return 0; } // Read some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, -1, ec) < 0) return 0; } } #if defined(ASIO_HAS_IOCP) void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec) { // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } } #else // defined(ASIO_HAS_IOCP) bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::send(s, bufs, count, flags, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // defined(ASIO_HAS_IOCP) signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Send the data. DWORD send_buf_count = static_cast(count); DWORD bytes_transferred = 0; int result = error_wrapper(::WSASendTo(s, const_cast(bufs), send_buf_count, &bytes_transferred, flags, addr, static_cast(addrlen), 0, 0), ec); if (ec.value() == ERROR_NETNAME_DELETED) ec = asio::error::connection_reset; else if (ec.value() == ERROR_PORT_UNREACHABLE) ec = asio::error::connection_refused; if (result != 0) return socket_error_retval; ec = asio::error_code(); return bytes_transferred; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) msghdr msg = msghdr(); init_msghdr_msg_name(msg.msg_name, addr); msg.msg_namelen = static_cast(addrlen); msg.msg_iov = const_cast(bufs); msg.msg_iovlen = static_cast(count); #if defined(__linux__) flags |= MSG_NOSIGNAL; #endif // defined(__linux__) signed_size_type result = error_wrapper(::sendmsg(s, &msg, flags), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return 0; } // Write some data. for (;;) { // Try to complete the operation without blocking. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Check if operation succeeded. if (bytes >= 0) return bytes; // Operation failed. if ((state & user_set_non_blocking) || (ec != asio::error::would_block && ec != asio::error::try_again)) return 0; // Wait for socket to become ready. if (socket_ops::poll_write(s, 0, -1, ec) < 0) return 0; } } #if !defined(ASIO_HAS_IOCP) bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred) { for (;;) { // Write some data. signed_size_type bytes = socket_ops::sendto( s, bufs, count, flags, addr, addrlen, ec); // Retry operation if interrupted by signal. if (ec == asio::error::interrupted) continue; // Check if we need to run the operation again. if (ec == asio::error::would_block || ec == asio::error::try_again) return false; // Operation is complete. if (bytes >= 0) { ec = asio::error_code(); bytes_transferred = bytes; } else bytes_transferred = 0; return true; } } #endif // !defined(ASIO_HAS_IOCP) socket_type socket(int af, int type, int protocol, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) socket_type s = error_wrapper(::WSASocketW(af, type, protocol, 0, 0, WSA_FLAG_OVERLAPPED), ec); if (s == invalid_socket) return s; if (af == ASIO_OS_DEF(AF_INET6)) { // Try to enable the POSIX default behaviour of having IPV6_V6ONLY set to // false. This will only succeed on Windows Vista and later versions of // Windows, where a dual-stack IPv4/v6 implementation is available. DWORD optval = 0; ::setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&optval), sizeof(optval)); } ec = asio::error_code(); return s; #elif defined(__MACH__) && defined(__APPLE__) || defined(__FreeBSD__) socket_type s = error_wrapper(::socket(af, type, protocol), ec); if (s == invalid_socket) return s; int optval = 1; int result = error_wrapper(::setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)), ec); if (result != 0) { ::close(s); return invalid_socket; } return s; #else int s = error_wrapper(::socket(af, type, protocol), ec); if (s >= 0) ec = asio::error_code(); return s; #endif } template inline int call_setsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, const void* optval, std::size_t optlen) { return ::setsockopt(s, level, optname, (const char*)optval, (SockLenType)optlen); } int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } if (*static_cast(optval)) state |= enable_connection_aborted; else state &= ~enable_connection_aborted; ec = asio::error_code(); return 0; } if (level == SOL_SOCKET && optname == SO_LINGER) state |= user_set_linger; #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *sso_t)(SOCKET, int, int, const char*, int); if (sso_t sso = (sso_t)::GetProcAddress(winsock_module, "setsockopt")) { clear_last_error(); return error_wrapper(sso(s, level, optname, reinterpret_cast(optval), static_cast(optlen)), ec); } } ec = asio::error::fault; return socket_error_retval; #else // defined(__BORLANDC__) clear_last_error(); int result = error_wrapper(call_setsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result == 0) { ec = asio::error_code(); #if defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) \ || defined(__OpenBSD__) || defined(__QNX__) // To implement portable behaviour for SO_REUSEADDR with UDP sockets we // need to also set SO_REUSEPORT on BSD-based platforms. if ((state & datagram_oriented) && level == SOL_SOCKET && optname == SO_REUSEADDR) { call_setsockopt(&msghdr::msg_namelen, s, SOL_SOCKET, SO_REUSEPORT, optval, optlen); } #endif } return result; #endif // defined(__BORLANDC__) } template inline int call_getsockopt(SockLenType msghdr::*, socket_type s, int level, int optname, void* optval, std::size_t* optlen) { SockLenType tmp_optlen = (SockLenType)*optlen; int result = ::getsockopt(s, level, optname, (char*)optval, &tmp_optlen); *optlen = (std::size_t)tmp_optlen; return result; } int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } if (level == custom_socket_option_level && optname == always_fail_option) { ec = asio::error::invalid_argument; return socket_error_retval; } if (level == custom_socket_option_level && optname == enable_connection_aborted_option) { if (*optlen != sizeof(int)) { ec = asio::error::invalid_argument; return socket_error_retval; } *static_cast(optval) = (state & enable_connection_aborted) ? 1 : 0; ec = asio::error_code(); return 0; } #if defined(__BORLANDC__) // Mysteriously, using the getsockopt and setsockopt functions directly with // Borland C++ results in incorrect values being set and read. The bug can be // worked around by using function addresses resolved with GetProcAddress. if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { typedef int (WSAAPI *gso_t)(SOCKET, int, int, char*, int*); if (gso_t gso = (gso_t)::GetProcAddress(winsock_module, "getsockopt")) { clear_last_error(); int tmp_optlen = static_cast(*optlen); int result = error_wrapper(gso(s, level, optname, reinterpret_cast(optval), &tmp_optlen), ec); *optlen = static_cast(tmp_optlen); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are // only supported on Windows Vista and later. To simplify program logic // we will fake success of getting this option and specify that the // value is non-zero (i.e. true). This corresponds to the behavior of // IPv6 sockets on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } return result; } } ec = asio::error::fault; return socket_error_retval; #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); if (result != 0 && level == IPPROTO_IPV6 && optname == IPV6_V6ONLY && ec.value() == WSAENOPROTOOPT && *optlen == sizeof(DWORD)) { // Dual-stack IPv4/v6 sockets, and the IPV6_V6ONLY socket option, are only // supported on Windows Vista and later. To simplify program logic we will // fake success of getting this option and specify that the value is // non-zero (i.e. true). This corresponds to the behavior of IPv6 sockets // on Windows platforms pre-Vista. *static_cast(optval) = 1; ec = asio::error_code(); } if (result == 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getsockopt(&msghdr::msg_namelen, s, level, optname, optval, optlen), ec); #if defined(__linux__) if (result == 0 && level == SOL_SOCKET && *optlen == sizeof(int) && (optname == SO_SNDBUF || optname == SO_RCVBUF)) { // On Linux, setting SO_SNDBUF or SO_RCVBUF to N actually causes the kernel // to set the buffer size to N*2. Linux puts additional stuff into the // buffers so that only about half is actually available to the application. // The retrieved value is divided by 2 here to make it appear as though the // correct value has been set. *static_cast(optval) /= 2; } #endif // defined(__linux__) if (result == 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } template inline int call_getpeername(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getpeername(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) \ || defined(__CYGWIN__) if (cached) { // Check if socket is still connected. DWORD connect_time = 0; size_t connect_time_len = sizeof(connect_time); if (socket_ops::getsockopt(s, 0, SOL_SOCKET, SO_CONNECT_TIME, &connect_time, &connect_time_len, ec) == socket_error_retval) { return socket_error_retval; } if (connect_time == 0xFFFFFFFF) { ec = asio::error::not_connected; return socket_error_retval; } // The cached value is still valid. ec = asio::error_code(); return 0; } #else // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) // || defined(__CYGWIN__) (void)cached; #endif // defined(ASIO_WINDOWS) && !defined(ASIO_WINDOWS_APP) // || defined(__CYGWIN__) clear_last_error(); int result = error_wrapper(call_getpeername( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } template inline int call_getsockname(SockLenType msghdr::*, socket_type s, socket_addr_type* addr, std::size_t* addrlen) { SockLenType tmp_addrlen = (SockLenType)*addrlen; int result = ::getsockname(s, addr, &tmp_addrlen); *addrlen = (std::size_t)tmp_addrlen; return result; } int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); int result = error_wrapper(call_getsockname( &msghdr::msg_namelen, s, addr, addrlen), ec); if (result == 0) ec = asio::error_code(); return result; } int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) int result = error_wrapper(::ioctlsocket(s, cmd, arg), ec); #elif defined(__MACH__) && defined(__APPLE__) \ || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__) int result = error_wrapper(::ioctl(s, static_cast(cmd), arg), ec); #else int result = error_wrapper(::ioctl(s, cmd, arg), ec); #endif if (result >= 0) { ec = asio::error_code(); // When updating the non-blocking mode we always perform the ioctl syscall, // even if the flags would otherwise indicate that the socket is already in // the correct state. This ensures that the underlying socket is put into // the state that has been requested by the user. If the ioctl syscall was // successful then we need to update the flags to match. if (cmd == static_cast(FIONBIO)) { if (*arg) { state |= user_set_non_blocking; } else { // Clearing the non-blocking mode always overrides any internally-set // non-blocking flag. Any subsequent asynchronous operations will need // to re-enable non-blocking I/O. state &= ~(user_set_non_blocking | internal_non_blocking); } } } return result; } int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) if (!readfds && !writefds && !exceptfds && timeout) { DWORD milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000; if (milliseconds == 0) milliseconds = 1; // Force context switch. ::Sleep(milliseconds); ec = asio::error_code(); return 0; } // The select() call allows timeout values measured in microseconds, but the // system clock (as wrapped by boost::posix_time::microsec_clock) typically // has a resolution of 10 milliseconds. This can lead to a spinning select // reactor, meaning increased CPU usage, when waiting for the earliest // scheduled timeout if it's less than 10 milliseconds away. To avoid a tight // spin we'll use a minimum timeout of 1 millisecond. if (timeout && timeout->tv_sec == 0 && timeout->tv_usec > 0 && timeout->tv_usec < 1000) timeout->tv_usec = 1000; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if defined(__hpux) && defined(__SELECT) timespec ts; ts.tv_sec = timeout ? timeout->tv_sec : 0; ts.tv_nsec = timeout ? timeout->tv_usec * 1000 : 0; return error_wrapper(::pselect(nfds, readfds, writefds, exceptfds, timeout ? &ts : 0, 0), ec); #else int result = error_wrapper(::select(nfds, readfds, writefds, exceptfds, timeout), ec); if (result >= 0) ec = asio::error_code(); return result; #endif } int poll_read(socket_type s, state_type state, int msec, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval timeout_obj; timeval* timeout; if (state & user_set_non_blocking) { timeout_obj.tv_sec = 0; timeout_obj.tv_usec = 0; timeout = &timeout_obj; } else if (msec >= 0) { timeout_obj.tv_sec = msec / 1000; timeout_obj.tv_usec = (msec % 1000) * 1000; timeout = &timeout_obj; } else timeout = 0; clear_last_error(); int result = error_wrapper(::select(s + 1, &fds, 0, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLIN; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : msec; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_write(socket_type s, state_type state, int msec, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval timeout_obj; timeval* timeout; if (state & user_set_non_blocking) { timeout_obj.tv_sec = 0; timeout_obj.tv_usec = 0; timeout = &timeout_obj; } else if (msec >= 0) { timeout_obj.tv_sec = msec / 1000; timeout_obj.tv_usec = (msec % 1000) * 1000; timeout = &timeout_obj; } else timeout = 0; clear_last_error(); int result = error_wrapper(::select(s + 1, 0, &fds, 0, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : msec; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_error(socket_type s, state_type state, int msec, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set fds; FD_ZERO(&fds); FD_SET(s, &fds); timeval timeout_obj; timeval* timeout; if (state & user_set_non_blocking) { timeout_obj.tv_sec = 0; timeout_obj.tv_usec = 0; timeout = &timeout_obj; } else if (msec >= 0) { timeout_obj.tv_sec = msec / 1000; timeout_obj.tv_usec = (msec % 1000) * 1000; timeout = &timeout_obj; } else timeout = 0; clear_last_error(); int result = error_wrapper(::select(s + 1, 0, 0, &fds, timeout), ec); #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLPRI | POLLERR | POLLHUP; fds.revents = 0; int timeout = (state & user_set_non_blocking) ? 0 : msec; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, timeout), ec); #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) if (result == 0) ec = (state & user_set_non_blocking) ? asio::error::would_block : asio::error_code(); else if (result > 0) ec = asio::error_code(); return result; } int poll_connect(socket_type s, int msec, asio::error_code& ec) { if (s == invalid_socket) { ec = asio::error::bad_descriptor; return socket_error_retval; } #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) fd_set write_fds; FD_ZERO(&write_fds); FD_SET(s, &write_fds); fd_set except_fds; FD_ZERO(&except_fds); FD_SET(s, &except_fds); timeval timeout_obj; timeval* timeout; if (msec >= 0) { timeout_obj.tv_sec = msec / 1000; timeout_obj.tv_usec = (msec % 1000) * 1000; timeout = &timeout_obj; } else timeout = 0; clear_last_error(); int result = error_wrapper(::select( s + 1, 0, &write_fds, &except_fds, timeout), ec); if (result >= 0) ec = asio::error_code(); return result; #else // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) pollfd fds; fds.fd = s; fds.events = POLLOUT; fds.revents = 0; clear_last_error(); int result = error_wrapper(::poll(&fds, 1, msec), ec); if (result >= 0) ec = asio::error_code(); return result; #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) } #endif // !defined(ASIO_WINDOWS_RUNTIME) const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sprintf. const unsigned char* bytes = static_cast(src); if (af == ASIO_OS_DEF(AF_INET)) { sprintf_s(dest, length, "%u.%u.%u.%u", bytes[0], bytes[1], bytes[2], bytes[3]); return dest; } else if (af == ASIO_OS_DEF(AF_INET6)) { size_t n = 0, b = 0, z = 0; while (n < length && b < 16) { if (bytes[b] == 0 && bytes[b + 1] == 0 && z == 0) { do b += 2; while (b < 16 && bytes[b] == 0 && bytes[b + 1] == 0); n += sprintf_s(dest + n, length - n, ":%s", b < 16 ? "" : ":"), ++z; } else { n += sprintf_s(dest + n, length - n, "%s%x", b ? ":" : "", (static_cast(bytes[b]) << 8) | bytes[b + 1]); b += 2; } } if (scope_id) n += sprintf_s(dest + n, length - n, "%%%lu", scope_id); return dest; } else { ec = asio::error::address_family_not_supported; return 0; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return 0; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; DWORD address_length; if (af == ASIO_OS_DEF(AF_INET)) { address_length = sizeof(sockaddr_in4_type); address.v4.sin_family = ASIO_OS_DEF(AF_INET); address.v4.sin_port = 0; memcpy(&address.v4.sin_addr, src, sizeof(in4_addr_type)); } else // AF_INET6 { address_length = sizeof(sockaddr_in6_type); address.v6.sin6_family = ASIO_OS_DEF(AF_INET6); address.v6.sin6_port = 0; address.v6.sin6_flowinfo = 0; address.v6.sin6_scope_id = scope_id; memcpy(&address.v6.sin6_addr, src, sizeof(in6_addr_type)); } DWORD string_length = static_cast(length); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) LPWSTR string_buffer = (LPWSTR)_alloca(length * sizeof(WCHAR)); int result = error_wrapper(::WSAAddressToStringW(&address.base, address_length, 0, string_buffer, &string_length), ec); ::WideCharToMultiByte(CP_ACP, 0, string_buffer, -1, dest, static_cast(length), 0, 0); #else int result = error_wrapper(::WSAAddressToStringA( &address.base, address_length, 0, dest, &string_length), ec); #endif // Windows may set error code on success. if (result != socket_error_retval) ec = asio::error_code(); // Windows may not set an error code on failure. else if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; return result == socket_error_retval ? 0 : dest; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) const char* result = error_wrapper(::inet_ntop( af, src, dest, static_cast(length)), ec); if (result == 0 && !ec) ec = asio::error::invalid_argument; if (result != 0 && af == ASIO_OS_DEF(AF_INET6) && scope_id != 0) { using namespace std; // For strcat and sprintf. char if_name[(IF_NAMESIZE > 21 ? IF_NAMESIZE : 21) + 1] = "%"; const in6_addr_type* ipv6_address = static_cast(src); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if ((!is_link_local && !is_multicast_link_local) || if_indextoname(static_cast(scope_id), if_name + 1) == 0) sprintf(if_name + 1, "%lu", scope_id); strcat(dest, if_name); } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) using namespace std; // For sscanf. unsigned char* bytes = static_cast(dest); if (af == ASIO_OS_DEF(AF_INET)) { unsigned int b0, b1, b2, b3; if (sscanf_s(src, "%u.%u.%u.%u", &b0, &b1, &b2, &b3) != 4) { ec = asio::error::invalid_argument; return -1; } if (b0 > 255 || b1 > 255 || b2 > 255 || b3 > 255) { ec = asio::error::invalid_argument; return -1; } bytes[0] = static_cast(b0); bytes[1] = static_cast(b1); bytes[2] = static_cast(b2); bytes[3] = static_cast(b3); ec = asio::error_code(); return 1; } else if (af == ASIO_OS_DEF(AF_INET6)) { unsigned char* bytes = static_cast(dest); std::memset(bytes, 0, 16); unsigned char back_bytes[16] = { 0 }; int num_front_bytes = 0, num_back_bytes = 0; const char* p = src; enum { fword, fcolon, bword, scope, done } state = fword; unsigned long current_word = 0; while (state != done) { if (current_word > 0xFFFF) { ec = asio::error::invalid_argument; return -1; } switch (state) { case fword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes == 16) { ec = asio::error::invalid_argument; return -1; } bytes[num_front_bytes++] = (current_word >> 8) & 0xFF; bytes[num_front_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = fcolon, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case fcolon: if (*p == ':') state = bword, ++p; else state = fword; break; case bword: if (*p >= '0' && *p <= '9') current_word = current_word * 16 + *p++ - '0'; else if (*p >= 'a' && *p <= 'f') current_word = current_word * 16 + *p++ - 'a' + 10; else if (*p >= 'A' && *p <= 'F') current_word = current_word * 16 + *p++ - 'A' + 10; else { if (num_front_bytes + num_back_bytes == 16) { ec = asio::error::invalid_argument; return -1; } back_bytes[num_back_bytes++] = (current_word >> 8) & 0xFF; back_bytes[num_back_bytes++] = current_word & 0xFF; current_word = 0; if (*p == ':') state = bword, ++p; else if (*p == '%') state = scope, ++p; else if (*p == 0) state = done; else { ec = asio::error::invalid_argument; return -1; } } break; case scope: if (*p >= '0' && *p <= '9') current_word = current_word * 10 + *p++ - '0'; else if (*p == 0) *scope_id = current_word, state = done; else { ec = asio::error::invalid_argument; return -1; } break; default: break; } } for (int i = 0; i < num_back_bytes; ++i) bytes[16 - num_back_bytes + i] = back_bytes[i]; ec = asio::error_code(); return 1; } else { ec = asio::error::address_family_not_supported; return -1; } #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For memcpy and strcmp. if (af != ASIO_OS_DEF(AF_INET) && af != ASIO_OS_DEF(AF_INET6)) { ec = asio::error::address_family_not_supported; return -1; } union { socket_addr_type base; sockaddr_storage_type storage; sockaddr_in4_type v4; sockaddr_in6_type v6; } address; int address_length = sizeof(sockaddr_storage_type); #if defined(BOOST_NO_ANSI_APIS) || (defined(_MSC_VER) && (_MSC_VER >= 1800)) int num_wide_chars = static_cast(strlen(src)) + 1; LPWSTR wide_buffer = (LPWSTR)_alloca(num_wide_chars * sizeof(WCHAR)); ::MultiByteToWideChar(CP_ACP, 0, src, -1, wide_buffer, num_wide_chars); int result = error_wrapper(::WSAStringToAddressW( wide_buffer, af, 0, &address.base, &address_length), ec); #else int result = error_wrapper(::WSAStringToAddressA( const_cast(src), af, 0, &address.base, &address_length), ec); #endif if (af == ASIO_OS_DEF(AF_INET)) { if (result != socket_error_retval) { memcpy(dest, &address.v4.sin_addr, sizeof(in4_addr_type)); ec = asio::error_code(); } else if (strcmp(src, "255.255.255.255") == 0) { static_cast(dest)->s_addr = INADDR_NONE; ec = asio::error_code(); } } else // AF_INET6 { if (result != socket_error_retval) { memcpy(dest, &address.v6.sin6_addr, sizeof(in6_addr_type)); if (scope_id) *scope_id = address.v6.sin6_scope_id; ec = asio::error_code(); } } // Windows may not set an error code on failure. if (result == socket_error_retval && !ec) ec = asio::error::invalid_argument; if (result != socket_error_retval) ec = asio::error_code(); return result == socket_error_retval ? -1 : 1; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) using namespace std; // For strchr, memcpy and atoi. // On some platforms, inet_pton fails if an address string contains a scope // id. Detect and remove the scope id before passing the string to inet_pton. const bool is_v6 = (af == ASIO_OS_DEF(AF_INET6)); const char* if_name = is_v6 ? strchr(src, '%') : 0; char src_buf[max_addr_v6_str_len + 1]; const char* src_ptr = src; if (if_name != 0) { if (if_name - src > max_addr_v6_str_len) { ec = asio::error::invalid_argument; return 0; } memcpy(src_buf, src, if_name - src); src_buf[if_name - src] = 0; src_ptr = src_buf; } int result = error_wrapper(::inet_pton(af, src_ptr, dest), ec); if (result <= 0 && !ec) ec = asio::error::invalid_argument; if (result > 0 && is_v6 && scope_id) { using namespace std; // For strchr and atoi. *scope_id = 0; if (if_name != 0) { in6_addr_type* ipv6_address = static_cast(dest); bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe) && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80)); bool is_multicast_link_local = ((ipv6_address->s6_addr[0] == 0xff) && ((ipv6_address->s6_addr[1] & 0x0f) == 0x02)); if (is_link_local || is_multicast_link_local) *scope_id = if_nametoindex(if_name + 1); if (*scope_id == 0) *scope_id = atoi(if_name + 1); } } return result; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } int gethostname(char* name, int namelen, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS_RUNTIME) try { using namespace Windows::Foundation::Collections; using namespace Windows::Networking; using namespace Windows::Networking::Connectivity; IVectorView^ hostnames = NetworkInformation::GetHostNames(); for (unsigned i = 0; i < hostnames->Size; ++i) { HostName^ hostname = hostnames->GetAt(i); if (hostname->Type == HostNameType::DomainName) { std::wstring_convert> converter; std::string raw_name = converter.to_bytes(hostname->RawName->Data()); if (namelen > 0 && raw_name.size() < static_cast(namelen)) { strcpy_s(name, namelen, raw_name.c_str()); return 0; } } } return -1; } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return -1; } #else // defined(ASIO_WINDOWS_RUNTIME) int result = error_wrapper(::gethostname(name, namelen), ec); # if defined(ASIO_WINDOWS) if (result == 0) ec = asio::error_code(); # endif // defined(ASIO_WINDOWS) return result; #endif // defined(ASIO_WINDOWS_RUNTIME) } #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_HAS_GETADDRINFO) // The following functions are only needed for emulation of getaddrinfo and // getnameinfo. inline asio::error_code translate_netdb_error(int error) { switch (error) { case 0: return asio::error_code(); case HOST_NOT_FOUND: return asio::error::host_not_found; case TRY_AGAIN: return asio::error::host_not_found_try_again; case NO_RECOVERY: return asio::error::no_recovery; case NO_DATA: return asio::error::no_data; default: ASIO_ASSERT(false); return asio::error::invalid_argument; } } inline hostent* gethostbyaddr(const char* addr, int length, int af, hostent* result, char* buffer, int buflength, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); hostent* retval = error_wrapper(::gethostbyaddr(addr, length, af), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return retval; #elif defined(__sun) || defined(__QNX__) int error = 0; hostent* retval = error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyaddr( addr, length, af, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else hostent* retval = 0; int error = 0; error_wrapper(::gethostbyaddr_r(addr, length, af, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline hostent* gethostbyname(const char* name, int af, struct hostent* result, char* buffer, int buflength, int ai_flags, asio::error_code& ec) { clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) (void)(buffer); (void)(buflength); (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = error_wrapper(::gethostbyname(name), ec); if (!retval) return 0; ec = asio::error_code(); *result = *retval; return result; #elif defined(__sun) || defined(__QNX__) (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } int error = 0; hostent* retval = error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #elif defined(__MACH__) && defined(__APPLE__) (void)(buffer); (void)(buflength); int error = 0; hostent* retval = error_wrapper(::getipnodebyname( name, af, ai_flags, &error), ec); if (error) ec = translate_netdb_error(error); if (!retval) return 0; *result = *retval; return retval; #else (void)(ai_flags); if (af != ASIO_OS_DEF(AF_INET)) { ec = asio::error::address_family_not_supported; return 0; } hostent* retval = 0; int error = 0; error_wrapper(::gethostbyname_r(name, result, buffer, buflength, &retval, &error), ec); if (error) ec = translate_netdb_error(error); return retval; #endif } inline void freehostent(hostent* h) { #if defined(__MACH__) && defined(__APPLE__) if (h) ::freehostent(h); #else (void)(h); #endif } // Emulation of getaddrinfo based on implementation in: // Stevens, W. R., UNIX Network Programming Vol. 1, 2nd Ed., Prentice-Hall 1998. struct gai_search { const char* host; int family; }; inline int gai_nsearch(const char* host, const addrinfo_type* hints, gai_search (&search)[2]) { int search_count = 0; if (host == 0 || host[0] == '\0') { if (hints->ai_flags & AI_PASSIVE) { // No host and AI_PASSIVE implies wildcard bind. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "0::0"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "0.0.0.0"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } else { // No host and not AI_PASSIVE means connect to local host. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = "localhost"; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } } else { // Host is specified. switch (hints->ai_family) { case ASIO_OS_DEF(AF_INET): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; case ASIO_OS_DEF(AF_INET6): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; break; case ASIO_OS_DEF(AF_UNSPEC): search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET6); ++search_count; search[search_count].host = host; search[search_count].family = ASIO_OS_DEF(AF_INET); ++search_count; break; default: break; } } return search_count; } template inline T* gai_alloc(std::size_t size = sizeof(T)) { using namespace std; T* p = static_cast(::operator new(size, std::nothrow)); if (p) memset(p, 0, size); return p; } inline void gai_free(void* p) { ::operator delete(p); } inline void gai_strcpy(char* target, const char* source, std::size_t max_size) { using namespace std; #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(target, max_size, source); #else // defined(ASIO_HAS_SECURE_RTL) *target = 0; if (max_size > 0) strncat(target, source, max_size - 1); #endif // defined(ASIO_HAS_SECURE_RTL) } enum { gai_clone_flag = 1 << 30 }; inline int gai_aistruct(addrinfo_type*** next, const addrinfo_type* hints, const void* addr, int family) { using namespace std; addrinfo_type* ai = gai_alloc(); if (ai == 0) return EAI_MEMORY; ai->ai_next = 0; **next = ai; *next = &ai->ai_next; ai->ai_canonname = 0; ai->ai_socktype = hints->ai_socktype; if (ai->ai_socktype == 0) ai->ai_flags |= gai_clone_flag; ai->ai_protocol = hints->ai_protocol; ai->ai_family = family; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = gai_alloc(); if (sinptr == 0) return EAI_MEMORY; sinptr->sin_family = ASIO_OS_DEF(AF_INET); memcpy(&sinptr->sin_addr, addr, sizeof(in4_addr_type)); ai->ai_addr = reinterpret_cast(sinptr); ai->ai_addrlen = sizeof(sockaddr_in4_type); break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = gai_alloc(); if (sin6ptr == 0) return EAI_MEMORY; sin6ptr->sin6_family = ASIO_OS_DEF(AF_INET6); memcpy(&sin6ptr->sin6_addr, addr, sizeof(in6_addr_type)); ai->ai_addr = reinterpret_cast(sin6ptr); ai->ai_addrlen = sizeof(sockaddr_in6_type); break; } default: break; } return 0; } inline addrinfo_type* gai_clone(addrinfo_type* ai) { using namespace std; addrinfo_type* new_ai = gai_alloc(); if (new_ai == 0) return new_ai; new_ai->ai_next = ai->ai_next; ai->ai_next = new_ai; new_ai->ai_flags = 0; new_ai->ai_family = ai->ai_family; new_ai->ai_socktype = ai->ai_socktype; new_ai->ai_protocol = ai->ai_protocol; new_ai->ai_canonname = 0; new_ai->ai_addrlen = ai->ai_addrlen; new_ai->ai_addr = gai_alloc(ai->ai_addrlen); memcpy(new_ai->ai_addr, ai->ai_addr, ai->ai_addrlen); return new_ai; } inline int gai_port(addrinfo_type* aihead, int port, int socktype) { int num_found = 0; for (addrinfo_type* ai = aihead; ai; ai = ai->ai_next) { if (ai->ai_flags & gai_clone_flag) { if (ai->ai_socktype != 0) { ai = gai_clone(ai); if (ai == 0) return -1; // ai now points to newly cloned entry. } } else if (ai->ai_socktype != socktype) { // Ignore if mismatch on socket type. continue; } ai->ai_socktype = socktype; switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = reinterpret_cast(ai->ai_addr); sinptr->sin_port = port; ++num_found; break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = reinterpret_cast(ai->ai_addr); sin6ptr->sin6_port = port; ++num_found; break; } default: break; } } return num_found; } inline int gai_serv(addrinfo_type* aihead, const addrinfo_type* hints, const char* serv) { using namespace std; int num_found = 0; if ( #if defined(AI_NUMERICSERV) (hints->ai_flags & AI_NUMERICSERV) || #endif isdigit(static_cast(serv[0]))) { int port = htons(atoi(serv)); if (hints->ai_socktype) { // Caller specifies socket type. int rc = gai_port(aihead, port, hints->ai_socktype); if (rc < 0) return EAI_MEMORY; num_found += rc; } else { // Caller does not specify socket type. int rc = gai_port(aihead, port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; rc = gai_port(aihead, port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } else { // Try service name with TCP first, then UDP. if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_STREAM) { servent* sptr = getservbyname(serv, "tcp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_STREAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } if (hints->ai_socktype == 0 || hints->ai_socktype == SOCK_DGRAM) { servent* sptr = getservbyname(serv, "udp"); if (sptr != 0) { int rc = gai_port(aihead, sptr->s_port, SOCK_DGRAM); if (rc < 0) return EAI_MEMORY; num_found += rc; } } } if (num_found == 0) { if (hints->ai_socktype == 0) { // All calls to getservbyname() failed. return EAI_NONAME; } else { // Service not supported for socket type. return EAI_SERVICE; } } return 0; } inline int gai_echeck(const char* host, const char* service, int flags, int family, int socktype, int protocol) { (void)(flags); (void)(protocol); // Host or service must be specified. if (host == 0 || host[0] == '\0') if (service == 0 || service[0] == '\0') return EAI_NONAME; // Check combination of family and socket type. switch (family) { case ASIO_OS_DEF(AF_UNSPEC): break; case ASIO_OS_DEF(AF_INET): case ASIO_OS_DEF(AF_INET6): if (service != 0 && service[0] != '\0') if (socktype != 0 && socktype != SOCK_STREAM && socktype != SOCK_DGRAM) return EAI_SOCKTYPE; break; default: return EAI_FAMILY; } return 0; } inline void freeaddrinfo_emulation(addrinfo_type* aihead) { addrinfo_type* ai = aihead; while (ai) { gai_free(ai->ai_addr); gai_free(ai->ai_canonname); addrinfo_type* ainext = ai->ai_next; gai_free(ai); ai = ainext; } } inline int getaddrinfo_emulation(const char* host, const char* service, const addrinfo_type* hintsp, addrinfo_type** result) { // Set up linked list of addrinfo structures. addrinfo_type* aihead = 0; addrinfo_type** ainext = &aihead; char* canon = 0; // Supply default hints if not specified by caller. addrinfo_type hints = addrinfo_type(); hints.ai_family = ASIO_OS_DEF(AF_UNSPEC); if (hintsp) hints = *hintsp; // If the resolution is not specifically for AF_INET6, remove the AI_V4MAPPED // and AI_ALL flags. #if defined(AI_V4MAPPED) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_V4MAPPED; #endif #if defined(AI_ALL) if (hints.ai_family != ASIO_OS_DEF(AF_INET6)) hints.ai_flags &= ~AI_ALL; #endif // Basic error checking. int rc = gai_echeck(host, service, hints.ai_flags, hints.ai_family, hints.ai_socktype, hints.ai_protocol); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } gai_search search[2]; int search_count = gai_nsearch(host, &hints, search); for (gai_search* sptr = search; sptr < search + search_count; ++sptr) { // Check for IPv4 dotted decimal string. in4_addr_type inaddr; asio::error_code ec; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), sptr->host, &inaddr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET)) { rc = gai_aistruct(&ainext, &hints, &inaddr, ASIO_OS_DEF(AF_INET)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Check for IPv6 hex string. in6_addr_type in6addr; if (socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), sptr->host, &in6addr, 0, ec) == 1) { if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != ASIO_OS_DEF(AF_INET6)) { freeaddrinfo_emulation(aihead); gai_free(canon); return EAI_FAMILY; } if (sptr->family == ASIO_OS_DEF(AF_INET6)) { rc = gai_aistruct(&ainext, &hints, &in6addr, ASIO_OS_DEF(AF_INET6)); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); return rc; } } continue; } // Look up hostname. hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyname(sptr->host, sptr->family, &hent, hbuf, sizeof(hbuf), hints.ai_flags, ec); if (hptr == 0) { if (search_count == 2) { // Failure is OK if there are multiple searches. continue; } freeaddrinfo_emulation(aihead); gai_free(canon); if (ec == asio::error::host_not_found) return EAI_NONAME; if (ec == asio::error::host_not_found_try_again) return EAI_AGAIN; if (ec == asio::error::no_recovery) return EAI_FAIL; if (ec == asio::error::no_data) return EAI_NONAME; return EAI_NONAME; } // Check for address family mismatch if one was specified. if (hints.ai_family != ASIO_OS_DEF(AF_UNSPEC) && hints.ai_family != hptr->h_addrtype) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } // Save canonical name first time. if (host != 0 && host[0] != '\0' && hptr->h_name && hptr->h_name[0] && (hints.ai_flags & AI_CANONNAME) && canon == 0) { std::size_t canon_len = strlen(hptr->h_name) + 1; canon = gai_alloc(canon_len); if (canon == 0) { freeaddrinfo_emulation(aihead); socket_ops::freehostent(hptr); return EAI_MEMORY; } gai_strcpy(canon, hptr->h_name, canon_len); } // Create an addrinfo structure for each returned address. for (char** ap = hptr->h_addr_list; *ap; ++ap) { rc = gai_aistruct(&ainext, &hints, *ap, hptr->h_addrtype); if (rc != 0) { freeaddrinfo_emulation(aihead); gai_free(canon); socket_ops::freehostent(hptr); return EAI_FAMILY; } } socket_ops::freehostent(hptr); } // Check if we found anything. if (aihead == 0) { gai_free(canon); return EAI_NONAME; } // Return canonical name in first entry. if (host != 0 && host[0] != '\0' && (hints.ai_flags & AI_CANONNAME)) { if (canon) { aihead->ai_canonname = canon; canon = 0; } else { std::size_t canonname_len = strlen(search[0].host) + 1; aihead->ai_canonname = gai_alloc(canonname_len); if (aihead->ai_canonname == 0) { freeaddrinfo_emulation(aihead); return EAI_MEMORY; } gai_strcpy(aihead->ai_canonname, search[0].host, canonname_len); } } gai_free(canon); // Process the service name. if (service != 0 && service[0] != '\0') { rc = gai_serv(aihead, &hints, service); if (rc != 0) { freeaddrinfo_emulation(aihead); return rc; } } // Return result to caller. *result = aihead; return 0; } inline asio::error_code getnameinfo_emulation( const socket_addr_type* sa, std::size_t salen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { using namespace std; const char* addr; size_t addr_len; unsigned short port; switch (sa->sa_family) { case ASIO_OS_DEF(AF_INET): if (salen != sizeof(sockaddr_in4_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin_addr); addr_len = sizeof(in4_addr_type); port = reinterpret_cast(sa)->sin_port; break; case ASIO_OS_DEF(AF_INET6): if (salen != sizeof(sockaddr_in6_type)) { return ec = asio::error::invalid_argument; } addr = reinterpret_cast( &reinterpret_cast(sa)->sin6_addr); addr_len = sizeof(in6_addr_type); port = reinterpret_cast(sa)->sin6_port; break; default: return ec = asio::error::address_family_not_supported; } if (host && hostlen > 0) { if (flags & NI_NUMERICHOST) { if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } else { hostent hent; char hbuf[8192] = ""; hostent* hptr = socket_ops::gethostbyaddr(addr, static_cast(addr_len), sa->sa_family, &hent, hbuf, sizeof(hbuf), ec); if (hptr && hptr->h_name && hptr->h_name[0] != '\0') { if (flags & NI_NOFQDN) { char* dot = strchr(hptr->h_name, '.'); if (dot) { *dot = 0; } } gai_strcpy(host, hptr->h_name, hostlen); socket_ops::freehostent(hptr); } else { socket_ops::freehostent(hptr); if (flags & NI_NAMEREQD) { return ec = asio::error::host_not_found; } if (socket_ops::inet_ntop(sa->sa_family, addr, host, hostlen, 0, ec) == 0) { return ec; } } } } if (serv && servlen > 0) { if (flags & NI_NUMERICSERV) { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } else { #if defined(ASIO_HAS_PTHREADS) static ::pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; ::pthread_mutex_lock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) servent* sptr = ::getservbyport(port, (flags & NI_DGRAM) ? "udp" : 0); if (sptr && sptr->s_name && sptr->s_name[0] != '\0') { gai_strcpy(serv, sptr->s_name, servlen); } else { if (servlen < 6) { return ec = asio::error::no_buffer_space; } #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(serv, servlen, "%u", ntohs(port)); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(serv, "%u", ntohs(port)); #endif // defined(ASIO_HAS_SECURE_RTL) } #if defined(ASIO_HAS_PTHREADS) ::pthread_mutex_unlock(&mutex); #endif // defined(ASIO_HAS_PTHREADS) } } ec = asio::error_code(); return ec; } #endif // !defined(ASIO_HAS_GETADDRINFO) inline asio::error_code translate_addrinfo_error(int error) { switch (error) { case 0: return asio::error_code(); case EAI_AGAIN: return asio::error::host_not_found_try_again; case EAI_BADFLAGS: return asio::error::invalid_argument; case EAI_FAIL: return asio::error::no_recovery; case EAI_FAMILY: return asio::error::address_family_not_supported; case EAI_MEMORY: return asio::error::no_memory; case EAI_NONAME: #if defined(EAI_ADDRFAMILY) case EAI_ADDRFAMILY: #endif #if defined(EAI_NODATA) && (EAI_NODATA != EAI_NONAME) case EAI_NODATA: #endif return asio::error::host_not_found; case EAI_SERVICE: return asio::error::service_not_found; case EAI_SOCKTYPE: return asio::error::socket_type_not_supported; default: // Possibly the non-portable EAI_SYSTEM. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return asio::error_code( WSAGetLastError(), asio::error::get_system_category()); #else return asio::error_code( errno, asio::error::get_system_category()); #endif } } asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { host = (host && *host) ? host : 0; service = (service && *service) ? service : 0; clear_last_error(); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. int error = ::getaddrinfo(host, service, &hints, result); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gai_t)(const char*, const char*, const addrinfo_type*, addrinfo_type**); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gai_t gai = (gai_t)::GetProcAddress(winsock_module, "getaddrinfo")) { int error = gai(host, service, &hints, result); return ec = translate_addrinfo_error(error); } } int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); # endif #elif !defined(ASIO_HAS_GETADDRINFO) int error = getaddrinfo_emulation(host, service, &hints, result); return ec = translate_addrinfo_error(error); #else int error = ::getaddrinfo(host, service, &hints, result); #if defined(__MACH__) && defined(__APPLE__) using namespace std; // For isdigit and atoi. if (error == 0 && service && isdigit(static_cast(service[0]))) { u_short_type port = host_to_network_short(atoi(service)); for (addrinfo_type* ai = *result; ai; ai = ai->ai_next) { switch (ai->ai_family) { case ASIO_OS_DEF(AF_INET): { sockaddr_in4_type* sinptr = reinterpret_cast(ai->ai_addr); if (sinptr->sin_port == 0) sinptr->sin_port = port; break; } case ASIO_OS_DEF(AF_INET6): { sockaddr_in6_type* sin6ptr = reinterpret_cast(ai->ai_addr); if (sin6ptr->sin6_port == 0) sin6ptr->sin6_port = port; break; } default: break; } } } #endif return ec = translate_addrinfo_error(error); #endif } asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec) { if (cancel_token.expired()) ec = asio::error::operation_aborted; else socket_ops::getaddrinfo(host, service, hints, result, ec); return ec; } void freeaddrinfo(addrinfo_type* ai) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. ::freeaddrinfo(ai); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *fai_t)(addrinfo_type*); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (fai_t fai = (fai_t)::GetProcAddress(winsock_module, "freeaddrinfo")) { fai(ai); return; } } freeaddrinfo_emulation(ai); # endif #elif !defined(ASIO_HAS_GETADDRINFO) freeaddrinfo_emulation(ai); #else ::freeaddrinfo(ai); #endif } asio::error_code getnameinfo(const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(ASIO_HAS_GETADDRINFO) // Building for Windows XP, Windows Server 2003, or later. clear_last_error(); int error = ::getnameinfo(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); # else // Building for Windows 2000 or earlier. typedef int (WSAAPI *gni_t)(const socket_addr_type*, int, char*, DWORD, char*, DWORD, int); if (HMODULE winsock_module = ::GetModuleHandleA("ws2_32")) { if (gni_t gni = (gni_t)::GetProcAddress(winsock_module, "getnameinfo")) { clear_last_error(); int error = gni(addr, static_cast(addrlen), host, static_cast(hostlen), serv, static_cast(servlen), flags); return ec = translate_addrinfo_error(error); } } clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); # endif #elif !defined(ASIO_HAS_GETADDRINFO) using namespace std; // For memcpy. sockaddr_storage_type tmp_addr; memcpy(&tmp_addr, addr, addrlen); addr = reinterpret_cast(&tmp_addr); clear_last_error(); return getnameinfo_emulation(addr, addrlen, host, hostlen, serv, servlen, flags, ec); #else clear_last_error(); int error = ::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags); return ec = translate_addrinfo_error(error); #endif } asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } return ec; } asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec) { if (cancel_token.expired()) { ec = asio::error::operation_aborted; } else { // First try resolving with the service name. If that fails try resolving // but allow the service to be returned as a number. int flags = (sock_type == SOCK_DGRAM) ? NI_DGRAM : 0; socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags, ec); if (ec) { socket_ops::getnameinfo(addr, addrlen, host, hostlen, serv, servlen, flags | NI_NUMERICSERV, ec); } } return ec; } #endif // !defined(ASIO_WINDOWS_RUNTIME) u_long_type network_to_host_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_long_type result = (static_cast(value_p[0]) << 24) | (static_cast(value_p[1]) << 16) | (static_cast(value_p[2]) << 8) | static_cast(value_p[3]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_long_type host_to_network_long(u_long_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_long_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 24) & 0xFF); result_p[1] = static_cast((value >> 16) & 0xFF); result_p[2] = static_cast((value >> 8) & 0xFF); result_p[3] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htonl(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type network_to_host_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) unsigned char* value_p = reinterpret_cast(&value); u_short_type result = (static_cast(value_p[0]) << 8) | static_cast(value_p[1]); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return ntohs(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } u_short_type host_to_network_short(u_short_type value) { #if defined(ASIO_WINDOWS_RUNTIME) u_short_type result; unsigned char* result_p = reinterpret_cast(&result); result_p[0] = static_cast((value >> 8) & 0xFF); result_p[1] = static_cast(value & 0xFF); return result; #else // defined(ASIO_WINDOWS_RUNTIME) return htons(value); #endif // defined(ASIO_WINDOWS_RUNTIME) } } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPS_IPP galera-4-26.4.25/asio/asio/detail/impl/epoll_reactor.ipp000644 000164 177776 00000052666 15107057155 024170 0ustar00jenkinsnogroup000000 000000 // // detail/impl/epoll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include #include #include "asio/detail/epoll_reactor.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #if defined(ASIO_HAS_TIMERFD) # include #endif // defined(ASIO_HAS_TIMERFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { epoll_reactor::epoll_reactor(asio::execution_context& ctx) : execution_context_service_base(ctx), scheduler_(use_service(ctx)), mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( REACTOR_REGISTRATION, scheduler_.concurrency_hint())), interrupter_(), epoll_fd_(do_epoll_create()), timer_fd_(do_timerfd_create()), shutdown_(false), registered_descriptors_mutex_(mutex_.enabled()) { // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } } epoll_reactor::~epoll_reactor() { if (epoll_fd_ != -1) close(epoll_fd_); if (timer_fd_ != -1) close(timer_fd_); } void epoll_reactor::shutdown() { mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; while (descriptor_state* state = registered_descriptors_.first()) { for (int i = 0; i < max_ops; ++i) ops.push(state->op_queue_[i]); state->shutdown_ = true; registered_descriptors_.free(state); } timer_queues_.get_all_timers(ops); scheduler_.abandon_operations(ops); } void epoll_reactor::notify_fork( asio::execution_context::fork_event fork_ev) { if (fork_ev == asio::execution_context::fork_child) { if (epoll_fd_ != -1) ::close(epoll_fd_); epoll_fd_ = -1; epoll_fd_ = do_epoll_create(); if (timer_fd_ != -1) ::close(timer_fd_); timer_fd_ = -1; timer_fd_ = do_timerfd_create(); interrupter_.recreate(); // Add the interrupter's descriptor to epoll. epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev); interrupter_.interrupt(); // Add the timer descriptor to epoll. if (timer_fd_ != -1) { ev.events = EPOLLIN | EPOLLERR; ev.data.ptr = &timer_fd_; epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev); } update_timeout(); // Re-register all descriptors with epoll. mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); for (descriptor_state* state = registered_descriptors_.first(); state != 0; state = state->next_) { ev.events = state->registered_events_; ev.data.ptr = state; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev); if (result != 0) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll re-registration"); } } } } void epoll_reactor::init_task() { scheduler_.init_task(); } int epoll_reactor::register_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { descriptor_data = allocate_descriptor_state(); ASIO_HANDLER_REACTOR_REGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; for (int i = 0; i < max_ops; ++i) descriptor_data->try_speculative_[i] = true; } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) { if (errno == EPERM) { // This file descriptor type is not supported by epoll. However, if it is // a regular file then operations on it will not block. We will allow // this descriptor to be used and fail later if an operation on it would // otherwise require a trip through the reactor. descriptor_data->registered_events_ = 0; return 0; } return errno; } return 0; } int epoll_reactor::register_internal_descriptor( int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op) { descriptor_data = allocate_descriptor_state(); ASIO_HANDLER_REACTOR_REGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); { mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); descriptor_data->reactor_ = this; descriptor_data->descriptor_ = descriptor; descriptor_data->shutdown_ = false; descriptor_data->op_queue_[op_type].push(op); for (int i = 0; i < max_ops; ++i) descriptor_data->try_speculative_[i] = true; } epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET; descriptor_data->registered_events_ = ev.events; ev.data.ptr = descriptor_data; int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev); if (result != 0) return errno; return 0; } void epoll_reactor::move_descriptor(socket_type, epoll_reactor::per_descriptor_data& target_descriptor_data, epoll_reactor::per_descriptor_data& source_descriptor_data) { target_descriptor_data = source_descriptor_data; source_descriptor_data = 0; } void epoll_reactor::start_op(int op_type, socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative) { if (!descriptor_data) { op->ec_ = asio::error::bad_descriptor; post_immediate_completion(op, is_continuation); return; } mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (descriptor_data->shutdown_) { post_immediate_completion(op, is_continuation); return; } if (descriptor_data->op_queue_[op_type].empty()) { if (allow_speculative && (op_type != read_op || descriptor_data->op_queue_[except_op].empty())) { if (descriptor_data->try_speculative_[op_type]) { if (reactor_op::status status = op->perform()) { if (status == reactor_op::done_and_exhausted) if (descriptor_data->registered_events_ != 0) descriptor_data->try_speculative_[op_type] = false; descriptor_lock.unlock(); scheduler_.post_immediate_completion(op, is_continuation); return; } } if (descriptor_data->registered_events_ == 0) { op->ec_ = asio::error::operation_not_supported; scheduler_.post_immediate_completion(op, is_continuation); return; } if (op_type == write_op) { if ((descriptor_data->registered_events_ & EPOLLOUT) == 0) { epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_ | EPOLLOUT; ev.data.ptr = descriptor_data; if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0) { descriptor_data->registered_events_ |= ev.events; } else { op->ec_ = asio::error_code(errno, asio::error::get_system_category()); scheduler_.post_immediate_completion(op, is_continuation); return; } } } } else if (descriptor_data->registered_events_ == 0) { op->ec_ = asio::error::operation_not_supported; scheduler_.post_immediate_completion(op, is_continuation); return; } else { if (op_type == write_op) { descriptor_data->registered_events_ |= EPOLLOUT; } epoll_event ev = { 0, { 0 } }; ev.events = descriptor_data->registered_events_; ev.data.ptr = descriptor_data; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev); } } descriptor_data->op_queue_[op_type].push(op); scheduler_.work_started(); } void epoll_reactor::cancel_ops(socket_type, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_lock.unlock(); scheduler_.post_deferred_completions(ops); } void epoll_reactor::deregister_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data, bool closing) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { if (closing) { // The descriptor will be automatically removed from the epoll set when // it is closed. } else if (descriptor_data->registered_events_ != 0) { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); } op_queue ops; for (int i = 0; i < max_ops; ++i) { while (reactor_op* op = descriptor_data->op_queue_[i].front()) { op->ec_ = asio::error::operation_aborted; descriptor_data->op_queue_[i].pop(); ops.push(op); } } descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); ASIO_HANDLER_REACTOR_DEREGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); scheduler_.post_deferred_completions(ops); // Leave descriptor_data set so that it will be freed by the subsequent // call to cleanup_descriptor_data. } else { // We are shutting down, so prevent cleanup_descriptor_data from freeing // the descriptor_data object and let the destructor free it instead. descriptor_data = 0; } } void epoll_reactor::deregister_internal_descriptor(socket_type descriptor, epoll_reactor::per_descriptor_data& descriptor_data) { if (!descriptor_data) return; mutex::scoped_lock descriptor_lock(descriptor_data->mutex_); if (!descriptor_data->shutdown_) { epoll_event ev = { 0, { 0 } }; epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev); op_queue ops; for (int i = 0; i < max_ops; ++i) ops.push(descriptor_data->op_queue_[i]); descriptor_data->descriptor_ = -1; descriptor_data->shutdown_ = true; descriptor_lock.unlock(); ASIO_HANDLER_REACTOR_DEREGISTRATION(( context(), static_cast(descriptor), reinterpret_cast(descriptor_data))); // Leave descriptor_data set so that it will be freed by the subsequent // call to cleanup_descriptor_data. } else { // We are shutting down, so prevent cleanup_descriptor_data from freeing // the descriptor_data object and let the destructor free it instead. descriptor_data = 0; } } void epoll_reactor::cleanup_descriptor_data( per_descriptor_data& descriptor_data) { if (descriptor_data) { free_descriptor_state(descriptor_data); descriptor_data = 0; } } void epoll_reactor::run(long usec, op_queue& ops) { // This code relies on the fact that the scheduler queues the reactor task // behind all descriptor operations generated by this function. This means, // that by the time we reach this point, any previously returned descriptor // operations have already been dequeued. Therefore it is now safe for us to // reuse and return them for the scheduler to queue again. // Calculate timeout. Check the timer queues only if timerfd is not in use. int timeout; if (usec == 0) timeout = 0; else { timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1); if (timer_fd_ == -1) { mutex::scoped_lock lock(mutex_); timeout = get_timeout(timeout); } } // Block on the epoll descriptor. epoll_event events[128]; int num_events = epoll_wait(epoll_fd_, events, 128, timeout); #if defined(ASIO_ENABLE_HANDLER_TRACKING) // Trace the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = events[i].data.ptr; if (ptr == &interrupter_) { // Ignore. } # if defined(ASIO_HAS_TIMERFD) else if (ptr == &timer_fd_) { // Ignore. } # endif // defined(ASIO_HAS_TIMERFD) else { unsigned event_mask = 0; if ((events[i].events & EPOLLIN) != 0) event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT; if ((events[i].events & EPOLLOUT)) event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT; if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0) event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT; ASIO_HANDLER_REACTOR_EVENTS((context(), reinterpret_cast(ptr), event_mask)); } } #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #if defined(ASIO_HAS_TIMERFD) bool check_timers = (timer_fd_ == -1); #else // defined(ASIO_HAS_TIMERFD) bool check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { void* ptr = events[i].data.ptr; if (ptr == &interrupter_) { // No need to reset the interrupter since we're leaving the descriptor // in a ready-to-read state and relying on edge-triggered notifications // to make it so that we only get woken up when the descriptor's epoll // registration is updated. #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ == -1) check_timers = true; #else // defined(ASIO_HAS_TIMERFD) check_timers = true; #endif // defined(ASIO_HAS_TIMERFD) } #if defined(ASIO_HAS_TIMERFD) else if (ptr == &timer_fd_) { check_timers = true; } #endif // defined(ASIO_HAS_TIMERFD) else { // The descriptor operation doesn't count as work in and of itself, so we // don't call work_started() here. This still allows the scheduler to // stop if the only remaining operations are descriptor operations. descriptor_state* descriptor_data = static_cast(ptr); if (!ops.is_enqueued(descriptor_data)) { descriptor_data->set_ready_events(events[i].events); ops.push(descriptor_data); } else { descriptor_data->add_ready_events(events[i].events); } } } if (check_timers) { mutex::scoped_lock common_lock(mutex_); timer_queues_.get_ready_timers(ops); #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); } #endif // defined(ASIO_HAS_TIMERFD) } } void epoll_reactor::interrupt() { epoll_event ev = { 0, { 0 } }; ev.events = EPOLLIN | EPOLLERR | EPOLLET; ev.data.ptr = &interrupter_; epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev); } int epoll_reactor::do_epoll_create() { #if defined(EPOLL_CLOEXEC) int fd = epoll_create1(EPOLL_CLOEXEC); #else // defined(EPOLL_CLOEXEC) int fd = -1; errno = EINVAL; #endif // defined(EPOLL_CLOEXEC) if (fd == -1 && (errno == EINVAL || errno == ENOSYS)) { fd = epoll_create(epoll_size); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "epoll"); } return fd; } int epoll_reactor::do_timerfd_create() { #if defined(ASIO_HAS_TIMERFD) # if defined(TFD_CLOEXEC) int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); # else // defined(TFD_CLOEXEC) int fd = -1; errno = EINVAL; # endif // defined(TFD_CLOEXEC) if (fd == -1 && errno == EINVAL) { fd = timerfd_create(CLOCK_MONOTONIC, 0); if (fd != -1) ::fcntl(fd, F_SETFD, FD_CLOEXEC); } return fd; #else // defined(ASIO_HAS_TIMERFD) return -1; #endif // defined(ASIO_HAS_TIMERFD) } epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state() { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING( REACTOR_IO, scheduler_.concurrency_hint())); } void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s) { mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_); registered_descriptors_.free(s); } void epoll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } void epoll_reactor::update_timeout() { #if defined(ASIO_HAS_TIMERFD) if (timer_fd_ != -1) { itimerspec new_timeout; itimerspec old_timeout; int flags = get_timeout(new_timeout); timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout); return; } #endif // defined(ASIO_HAS_TIMERFD) interrupt(); } int epoll_reactor::get_timeout(int msec) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. const int max_msec = 5 * 60 * 1000; return timer_queues_.wait_duration_msec( (msec < 0 || max_msec < msec) ? max_msec : msec); } #if defined(ASIO_HAS_TIMERFD) int epoll_reactor::get_timeout(itimerspec& ts) { ts.it_interval.tv_sec = 0; ts.it_interval.tv_nsec = 0; long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000); ts.it_value.tv_sec = usec / 1000000; ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1; return usec ? 0 : TFD_TIMER_ABSTIME; } #endif // defined(ASIO_HAS_TIMERFD) struct epoll_reactor::perform_io_cleanup_on_block_exit { explicit perform_io_cleanup_on_block_exit(epoll_reactor* r) : reactor_(r), first_op_(0) { } ~perform_io_cleanup_on_block_exit() { if (first_op_) { // Post the remaining completed operations for invocation. if (!ops_.empty()) reactor_->scheduler_.post_deferred_completions(ops_); // A user-initiated operation has completed, but there's no need to // explicitly call work_finished() here. Instead, we'll take advantage of // the fact that the scheduler will call work_finished() once we return. } else { // No user-initiated operations have completed, so we need to compensate // for the work_finished() call that the scheduler will make once this // operation returns. reactor_->scheduler_.compensating_work_started(); } } epoll_reactor* reactor_; op_queue ops_; operation* first_op_; }; epoll_reactor::descriptor_state::descriptor_state(bool locking) : operation(&epoll_reactor::descriptor_state::do_complete), mutex_(locking) { } operation* epoll_reactor::descriptor_state::perform_io(uint32_t events) { mutex_.lock(); perform_io_cleanup_on_block_exit io_cleanup(reactor_); mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock); // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI }; for (int j = max_ops - 1; j >= 0; --j) { if (events & (flag[j] | EPOLLERR | EPOLLHUP)) { try_speculative_[j] = true; while (reactor_op* op = op_queue_[j].front()) { if (reactor_op::status status = op->perform()) { op_queue_[j].pop(); io_cleanup.ops_.push(op); if (status == reactor_op::done_and_exhausted) { try_speculative_[j] = false; break; } } else break; } } } // The first operation will be returned for completion now. The others will // be posted for later by the io_cleanup object's destructor. io_cleanup.first_op_ = io_cleanup.ops_.front(); io_cleanup.ops_.pop(); return io_cleanup.first_op_; } void epoll_reactor::descriptor_state::do_complete( void* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { if (owner) { descriptor_state* descriptor_data = static_cast(base); uint32_t events = static_cast(bytes_transferred); if (operation* op = descriptor_data->perform_io(events)) { op->complete(owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP galera-4-26.4.25/asio/asio/detail/impl/win_iocp_handle_service.ipp000644 000164 177776 00000033013 15107057155 026161 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_iocp_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service::overlapped_wrapper : public OVERLAPPED { public: explicit overlapped_wrapper(asio::error_code& ec) { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; // Create a non-signalled manual-reset event, for GetOverlappedResult. hEvent = ::CreateEventW(0, TRUE, FALSE, 0); if (hEvent) { // As documented in GetQueuedCompletionStatus, setting the low order // bit of this event prevents our synchronous writes from being treated // as completion port events. DWORD_PTR tmp = reinterpret_cast(hEvent); hEvent = reinterpret_cast(tmp | 1); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } } ~overlapped_wrapper() { if (hEvent) { ::CloseHandle(hEvent); } } }; win_iocp_handle_service::win_iocp_handle_service(execution_context& context) : execution_context_service_base(context), iocp_service_(asio::use_service(context)), mutex_(), impl_list_(0) { } void win_iocp_handle_service::shutdown() { // Close all implementations, causing all operations to complete. asio::detail::mutex::scoped_lock lock(mutex_); implementation_type* impl = impl_list_; while (impl) { close_for_destruction(*impl); impl = impl->next_; } } void win_iocp_handle_service::construct( win_iocp_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_construct( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service::implementation_type& other_impl) { impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } void win_iocp_handle_service::move_assign( win_iocp_handle_service::implementation_type& impl, win_iocp_handle_service& other_service, win_iocp_handle_service::implementation_type& other_impl) { close_for_destruction(impl); if (this != &other_service) { // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_; other_impl.safe_cancellation_thread_id_ = 0; if (this != &other_service) { // Insert implementation into linked list of all implementations. asio::detail::mutex::scoped_lock lock(other_service.mutex_); impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } } void win_iocp_handle_service::destroy( win_iocp_handle_service::implementation_type& impl) { close_for_destruction(impl); // Remove implementation from linked list of all implementations. asio::detail::mutex::scoped_lock lock(mutex_); if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } asio::error_code win_iocp_handle_service::assign( win_iocp_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (iocp_service_.register_handle(handle, ec)) return ec; impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_iocp_handle_service::close( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", &impl, reinterpret_cast(impl.handle_), "close")); if (!::CloseHandle(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } else { ec = asio::error_code(); } return ec; } asio::error_code win_iocp_handle_service::cancel( win_iocp_handle_service::implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", &impl, reinterpret_cast(impl.handle_), "cancel")); if (FARPROC cancel_io_ex_ptr = ::GetProcAddress( ::GetModuleHandleA("KERNEL32"), "CancelIoEx")) { // The version of Windows supports cancellation from any thread. typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED); cancel_io_ex_t cancel_io_ex = reinterpret_cast( reinterpret_cast(cancel_io_ex_ptr)); if (!cancel_io_ex(impl.handle_, 0)) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_NOT_FOUND) { // ERROR_NOT_FOUND means that there were no operations to be // cancelled. We swallow this error to match the behaviour on other // platforms. ec = asio::error_code(); } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } } else { ec = asio::error_code(); } } else if (impl.safe_cancellation_thread_id_ == 0) { // No operations have been started, so there's nothing to cancel. ec = asio::error_code(); } else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId()) { // Asynchronous operations have been started from the current thread only, // so it is safe to try to cancel them using CancelIo. if (!::CancelIo(impl.handle_)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } } else { // Asynchronous operations have been started from more than one thread, // so cancellation is not safe. ec = asio::error::operation_not_supported; } return ec; } size_t win_iocp_handle_service::do_write( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to write 0 bytes on a handle is a no-op. if (buffer.size() == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Write the data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, buffer.data(), static_cast(buffer.size()), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_write_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (buffer.size() == 0) { // A request to write 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::WriteFile(impl.handle_, buffer.data(), static_cast(buffer.size()), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } size_t win_iocp_handle_service::do_read( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return 0; } // A request to read 0 bytes on a stream handle is a no-op. if (buffer.size() == 0) { ec = asio::error_code(); return 0; } overlapped_wrapper overlapped(ec); if (ec) { return 0; } // Read some data. overlapped.Offset = offset & 0xFFFFFFFF; overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, buffer.data(), static_cast(buffer.size()), 0, &overlapped); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return 0; } } // Wait for the operation to complete. DWORD bytes_transferred = 0; ok = ::GetOverlappedResult(impl.handle_, &overlapped, &bytes_transferred, TRUE); if (!ok) { DWORD last_error = ::GetLastError(); if (last_error == ERROR_HANDLE_EOF) { ec = asio::error::eof; } else { ec = asio::error_code(last_error, asio::error::get_system_category()); } return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0; } ec = asio::error_code(); return bytes_transferred; } void win_iocp_handle_service::start_read_op( win_iocp_handle_service::implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op) { update_cancellation_thread_id(impl); iocp_service_.work_started(); if (!is_open(impl)) { iocp_service_.on_completion(op, asio::error::bad_descriptor); } else if (buffer.size() == 0) { // A request to read 0 bytes on a handle is a no-op. iocp_service_.on_completion(op); } else { DWORD bytes_transferred = 0; op->Offset = offset & 0xFFFFFFFF; op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF; BOOL ok = ::ReadFile(impl.handle_, buffer.data(), static_cast(buffer.size()), &bytes_transferred, op); DWORD last_error = ::GetLastError(); if (!ok && last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA) { iocp_service_.on_completion(op, last_error, bytes_transferred); } else { iocp_service_.on_pending(op); } } } void win_iocp_handle_service::update_cancellation_thread_id( win_iocp_handle_service::implementation_type& impl) { if (impl.safe_cancellation_thread_id_ == 0) impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId(); else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId()) impl.safe_cancellation_thread_id_ = ~DWORD(0); } void win_iocp_handle_service::close_for_destruction(implementation_type& impl) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle", &impl, reinterpret_cast(impl.handle_), "close")); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; impl.safe_cancellation_thread_id_ = 0; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/service_registry.ipp000644 000164 177776 00000012325 15107057155 024712 0ustar00jenkinsnogroup000000 000000 // // detail/impl/service_registry.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/service_registry.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { service_registry::service_registry(execution_context& owner) : owner_(owner), first_service_(0) { } service_registry::~service_registry() { } void service_registry::shutdown_services() { execution_context::service* service = first_service_; while (service) { service->shutdown(); service = service->next_; } } void service_registry::destroy_services() { while (first_service_) { execution_context::service* next_service = first_service_->next_; destroy(first_service_); first_service_ = next_service; } } void service_registry::notify_fork(execution_context::fork_event fork_ev) { // Make a copy of all of the services while holding the lock. We don't want // to hold the lock while calling into each service, as it may try to call // back into this class. std::vector services; { asio::detail::mutex::scoped_lock lock(mutex_); execution_context::service* service = first_service_; while (service) { services.push_back(service); service = service->next_; } } // If processing the fork_prepare event, we want to go in reverse order of // service registration, which happens to be the existing order of the // services in the vector. For the other events we want to go in the other // direction. std::size_t num_services = services.size(); if (fork_ev == execution_context::fork_prepare) for (std::size_t i = 0; i < num_services; ++i) services[i]->notify_fork(fork_ev); else for (std::size_t i = num_services; i > 0; --i) services[i - 1]->notify_fork(fork_ev); } void service_registry::init_key_from_id(execution_context::service::key& key, const execution_context::id& id) { key.type_info_ = 0; key.id_ = &id; } bool service_registry::keys_match( const execution_context::service::key& key1, const execution_context::service::key& key2) { if (key1.id_ && key2.id_) if (key1.id_ == key2.id_) return true; if (key1.type_info_ && key2.type_info_) if (*key1.type_info_ == *key2.type_info_) return true; return false; } void service_registry::destroy(execution_context::service* service) { delete service; } execution_context::service* service_registry::do_use_service( const execution_context::service::key& key, factory_type factory, void* owner) { asio::detail::mutex::scoped_lock lock(mutex_); // First see if there is an existing service object with the given key. execution_context::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Create a new service object. The service registry's mutex is not locked // at this time to allow for nested calls into this function from the new // service's constructor. lock.unlock(); auto_service_ptr new_service = { factory(owner) }; new_service.ptr_->key_ = key; lock.lock(); // Check that nobody else created another service object of the same type // while the lock was released. service = first_service_; while (service) { if (keys_match(service->key_, key)) return service; service = service->next_; } // Service was successfully initialised, pass ownership to registry. new_service.ptr_->next_ = first_service_; first_service_ = new_service.ptr_; new_service.ptr_ = 0; return first_service_; } void service_registry::do_add_service( const execution_context::service::key& key, execution_context::service* new_service) { if (&owner_ != &new_service->context()) asio::detail::throw_exception(invalid_service_owner()); asio::detail::mutex::scoped_lock lock(mutex_); // Check if there is an existing service object with the given key. execution_context::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) asio::detail::throw_exception(service_already_exists()); service = service->next_; } // Take ownership of the service object. new_service->key_ = key; new_service->next_ = first_service_; first_service_ = new_service; } bool service_registry::do_has_service( const execution_context::service::key& key) const { asio::detail::mutex::scoped_lock lock(mutex_); execution_context::service* service = first_service_; while (service) { if (keys_match(service->key_, key)) return true; service = service->next_; } return false; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP galera-4-26.4.25/asio/asio/detail/impl/reactive_socket_service_base.ipp000644 000164 177776 00000020352 15107057155 027205 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_service_base.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/reactive_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_socket_service_base::reactive_socket_service_base( execution_context& context) : reactor_(use_service(context)) { reactor_.init_task(); } void reactive_socket_service_base::base_shutdown() { } void reactive_socket_service_base::construct( reactive_socket_service_base::base_implementation_type& impl) { impl.socket_ = invalid_socket; impl.state_ = 0; } void reactive_socket_service_base::base_move_construct( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base::base_implementation_type& other_impl) { impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::base_move_assign( reactive_socket_service_base::base_implementation_type& impl, reactive_socket_service_base& other_service, reactive_socket_service_base::base_implementation_type& other_impl) { destroy(impl); impl.socket_ = other_impl.socket_; other_impl.socket_ = invalid_socket; impl.state_ = other_impl.state_; other_impl.state_ = 0; other_service.reactor_.move_descriptor(impl.socket_, impl.reactor_data_, other_impl.reactor_data_); } void reactive_socket_service_base::destroy( reactive_socket_service_base::base_implementation_type& impl) { if (impl.socket_ != invalid_socket) { ASIO_HANDLER_OPERATION((reactor_.context(), "socket", &impl, impl.socket_, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); asio::error_code ignored_ec; socket_ops::close(impl.socket_, impl.state_, true, ignored_ec); reactor_.cleanup_descriptor_data(impl.reactor_data_); } } asio::error_code reactive_socket_service_base::close( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((reactor_.context(), "socket", &impl, impl.socket_, "close")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, (impl.state_ & socket_ops::possible_dup) == 0); socket_ops::close(impl.socket_, impl.state_, false, ec); reactor_.cleanup_descriptor_data(impl.reactor_data_); } else { ec = asio::error_code(); } // The descriptor is closed by the OS even if close() returns an error. // // (Actually, POSIX says the state of the descriptor is unspecified. On // Linux the descriptor is apparently closed anyway; e.g. see // http://lkml.org/lkml/2005/9/10/129 // We'll just have to assume that other OSes follow the same behaviour. The // known exception is when Windows's closesocket() function fails with // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close(). construct(impl); return ec; } socket_type reactive_socket_service_base::release( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return invalid_socket; } ASIO_HANDLER_OPERATION((reactor_.context(), "socket", &impl, impl.socket_, "release")); reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false); reactor_.cleanup_descriptor_data(impl.reactor_data_); socket_type sock = impl.socket_; construct(impl); ec = asio::error_code(); return sock; } asio::error_code reactive_socket_service_base::cancel( reactive_socket_service_base::base_implementation_type& impl, asio::error_code& ec) { if (!is_open(impl)) { ec = asio::error::bad_descriptor; return ec; } ASIO_HANDLER_OPERATION((reactor_.context(), "socket", &impl, impl.socket_, "cancel")); reactor_.cancel_ops(impl.socket_, impl.reactor_data_); ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_open( reactive_socket_service_base::base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } socket_holder sock(socket_ops::socket(af, type, protocol, ec)); if (sock.get() == invalid_socket) return ec; if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = sock.release(); switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } ec = asio::error_code(); return ec; } asio::error_code reactive_socket_service_base::do_assign( reactive_socket_service_base::base_implementation_type& impl, int type, const reactive_socket_service_base::native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } if (int err = reactor_.register_descriptor( native_socket, impl.reactor_data_)) { ec = asio::error_code(err, asio::error::get_system_category()); return ec; } impl.socket_ = native_socket; switch (type) { case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break; case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break; default: impl.state_ = 0; break; } impl.state_ |= socket_ops::possible_dup; ec = asio::error_code(); return ec; } void reactive_socket_service_base::start_op( reactive_socket_service_base::base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop) { if (!noop) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { reactor_.start_op(op_type, impl.socket_, impl.reactor_data_, op, is_continuation, is_non_blocking); return; } } reactor_.post_immediate_completion(op, is_continuation); } void reactive_socket_service_base::start_accept_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open) { if (!peer_is_open) start_op(impl, reactor::read_op, op, is_continuation, true, false); else { op->ec_ = asio::error::already_open; reactor_.post_immediate_completion(op, is_continuation); } } void reactive_socket_service_base::start_connect_op( reactive_socket_service_base::base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen) { if ((impl.state_ & socket_ops::non_blocking) || socket_ops::set_internal_non_blocking( impl.socket_, impl.state_, true, op->ec_)) { if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0) { if (op->ec_ == asio::error::in_progress || op->ec_ == asio::error::would_block) { op->ec_ = asio::error_code(); reactor_.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_, op, is_continuation, false); return; } } } reactor_.post_immediate_completion(op, is_continuation); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP galera-4-26.4.25/asio/asio/detail/impl/timer_queue_set.ipp000644 000164 177776 00000004274 15107057155 024525 0ustar00jenkinsnogroup000000 000000 // // detail/impl/timer_queue_set.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { timer_queue_set::timer_queue_set() : first_(0) { } void timer_queue_set::insert(timer_queue_base* q) { q->next_ = first_; first_ = q; } void timer_queue_set::erase(timer_queue_base* q) { if (first_) { if (q == first_) { first_ = q->next_; q->next_ = 0; return; } for (timer_queue_base* p = first_; p->next_; p = p->next_) { if (p->next_ == q) { p->next_ = q->next_; q->next_ = 0; return; } } } } bool timer_queue_set::all_empty() const { for (timer_queue_base* p = first_; p; p = p->next_) if (!p->empty()) return false; return true; } long timer_queue_set::wait_duration_msec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_msec(min_duration); return min_duration; } long timer_queue_set::wait_duration_usec(long max_duration) const { long min_duration = max_duration; for (timer_queue_base* p = first_; p; p = p->next_) min_duration = p->wait_duration_usec(min_duration); return min_duration; } void timer_queue_set::get_ready_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_ready_timers(ops); } void timer_queue_set::get_all_timers(op_queue& ops) { for (timer_queue_base* p = first_; p; p = p->next_) p->get_all_timers(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP galera-4-26.4.25/asio/asio/detail/impl/pipe_select_interrupter.ipp000644 000164 177776 00000005653 15107057155 026267 0ustar00jenkinsnogroup000000 000000 // // detail/impl/pipe_select_interrupter.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(ASIO_WINDOWS) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include #include #include #include #include "asio/detail/pipe_select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { pipe_select_interrupter::pipe_select_interrupter() { open_descriptors(); } void pipe_select_interrupter::open_descriptors() { int pipe_fds[2]; if (pipe(pipe_fds) == 0) { read_descriptor_ = pipe_fds[0]; ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK); write_descriptor_ = pipe_fds[1]; ::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "pipe_select_interrupter"); } } pipe_select_interrupter::~pipe_select_interrupter() { close_descriptors(); } void pipe_select_interrupter::close_descriptors() { if (read_descriptor_ != -1) ::close(read_descriptor_); if (write_descriptor_ != -1) ::close(write_descriptor_); } void pipe_select_interrupter::recreate() { close_descriptors(); write_descriptor_ = -1; read_descriptor_ = -1; open_descriptors(); } void pipe_select_interrupter::interrupt() { char byte = 0; signed_size_type result = ::write(write_descriptor_, &byte, 1); (void)result; } bool pipe_select_interrupter::reset() { for (;;) { char data[1024]; signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data)); if (bytes_read < 0 && errno == EINTR) continue; bool was_interrupted = (bytes_read > 0); while (bytes_read == sizeof(data)) bytes_read = ::read(read_descriptor_, data, sizeof(data)); return was_interrupted; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP galera-4-26.4.25/asio/asio/detail/impl/strand_executor_service.ipp000644 000164 177776 00000006717 15107057155 026263 0ustar00jenkinsnogroup000000 000000 // // detail/impl/strand_executor_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP #define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/strand_executor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { strand_executor_service::strand_executor_service(execution_context& ctx) : execution_context_service_base(ctx), mutex_(), salt_(0), impl_list_(0) { } void strand_executor_service::shutdown() { op_queue ops; asio::detail::mutex::scoped_lock lock(mutex_); strand_impl* impl = impl_list_; while (impl) { impl->mutex_->lock(); impl->shutdown_ = true; ops.push(impl->waiting_queue_); ops.push(impl->ready_queue_); impl->mutex_->unlock(); impl = impl->next_; } } strand_executor_service::implementation_type strand_executor_service::create_implementation() { implementation_type new_impl(new strand_impl); new_impl->locked_ = false; new_impl->shutdown_ = false; asio::detail::mutex::scoped_lock lock(mutex_); // Select a mutex from the pool of shared mutexes. std::size_t salt = salt_++; std::size_t mutex_index = reinterpret_cast(new_impl.get()); mutex_index += (reinterpret_cast(new_impl.get()) >> 3); mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2); mutex_index = mutex_index % num_mutexes; if (!mutexes_[mutex_index].get()) mutexes_[mutex_index].reset(new mutex); new_impl->mutex_ = mutexes_[mutex_index].get(); // Insert implementation into linked list of all implementations. new_impl->next_ = impl_list_; new_impl->prev_ = 0; if (impl_list_) impl_list_->prev_ = new_impl.get(); impl_list_ = new_impl.get(); new_impl->service_ = this; return new_impl; } strand_executor_service::strand_impl::~strand_impl() { asio::detail::mutex::scoped_lock lock(service_->mutex_); // Remove implementation from linked list of all implementations. if (service_->impl_list_ == this) service_->impl_list_ = next_; if (prev_) prev_->next_ = next_; if (next_) next_->prev_= prev_; } bool strand_executor_service::enqueue(const implementation_type& impl, scheduler_operation* op) { impl->mutex_->lock(); if (impl->shutdown_) { impl->mutex_->unlock(); op->destroy(); return false; } else if (impl->locked_) { // Some other function already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_->unlock(); return false; } else { // The function is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_->unlock(); impl->ready_queue_.push(op); return true; } } bool strand_executor_service::running_in_this_thread( const implementation_type& impl) { return !!call_stack::contains(impl.get()); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/signal_set_service.ipp000644 000164 177776 00000045773 15107057155 025207 0ustar00jenkinsnogroup000000 000000 // // detail/impl/signal_set_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/signal_set_service.hpp" #include "asio/detail/static_mutex.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct signal_state { // Mutex used for protecting global state. static_mutex mutex_; // The read end of the pipe used for signal notifications. int read_descriptor_; // The write end of the pipe used for signal notifications. int write_descriptor_; // Whether the signal state has been prepared for a fork. bool fork_prepared_; // The head of a linked list of all signal_set_service instances. class signal_set_service* service_list_; // A count of the number of objects that are registered for each signal. std::size_t registration_count_[max_signal_number]; }; signal_state* get_signal_state() { static signal_state state = { ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } }; return &state; } void asio_signal_handler(int signal_number) { #if defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) signal_set_service::deliver_signal(signal_number); #else // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) int saved_errno = errno; signal_state* state = get_signal_state(); signed_size_type result = ::write(state->write_descriptor_, &signal_number, sizeof(signal_number)); (void)result; errno = saved_errno; #endif // defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) #if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) ::signal(signal_number, asio_signal_handler); #endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION) } #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) class signal_set_service::pipe_read_op : public reactor_op { public: pipe_read_op() : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete) { } static status do_perform(reactor_op*) { signal_state* state = get_signal_state(); int fd = state->read_descriptor_; int signal_number = 0; while (::read(fd, &signal_number, sizeof(int)) == sizeof(int)) if (signal_number >= 0 && signal_number < max_signal_number) signal_set_service::deliver_signal(signal_number); return not_done; } static void do_complete(void* /*owner*/, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { pipe_read_op* o(static_cast(base)); delete o; } }; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) signal_set_service::signal_set_service(execution_context& context) : execution_context_service_base(context), scheduler_(asio::use_service(context)), #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_(asio::use_service(context)), #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) next_(0), prev_(0) { get_signal_state()->mutex_.init(); #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) reactor_.init_task(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) for (int i = 0; i < max_signal_number; ++i) registrations_[i] = 0; add_service(this); } signal_set_service::~signal_set_service() { remove_service(this); } void signal_set_service::shutdown() { remove_service(this); op_queue ops; for (int i = 0; i < max_signal_number; ++i) { registration* reg = registrations_[i]; while (reg) { ops.push(*reg->queue_); reg = reg->next_in_table_; } } scheduler_.abandon_operations(ops); } void signal_set_service::notify_fork(execution_context::fork_event fork_ev) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); switch (fork_ev) { case execution_context::fork_prepare: { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = true; lock.unlock(); reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_); reactor_.cleanup_descriptor_data(reactor_data_); } break; case execution_context::fork_parent: if (state->fork_prepared_) { int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; case execution_context::fork_child: if (state->fork_prepared_) { asio::detail::signal_blocker blocker; close_descriptors(); open_descriptors(); int read_descriptor = state->read_descriptor_; state->fork_prepared_ = false; lock.unlock(); reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, reactor_data_, new pipe_read_op); } break; default: break; } #else // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) (void)fork_ev; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::construct( signal_set_service::implementation_type& impl) { impl.signals_ = 0; } void signal_set_service::destroy( signal_set_service::implementation_type& impl) { asio::error_code ignored_ec; clear(impl, ignored_ec); cancel(impl, ignored_ec); } asio::error_code signal_set_service::add( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the appropriate place to insert the registration. registration** insertion_point = &impl.signals_; registration* next = impl.signals_; while (next && next->signal_number_ < signal_number) { insertion_point = &next->next_in_set_; next = next->next_in_set_; } // Only do something if the signal is not already registered. if (next == 0 || next->signal_number_ != signal_number) { registration* new_registration = new registration; #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Register for the signal if we're the first. if (state->registration_count_[signal_number] == 0) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = asio_signal_handler; sigfillset(&sa.sa_mask); if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, asio_signal_handler) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) delete new_registration; return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Record the new registration in the set. new_registration->signal_number_ = signal_number; new_registration->queue_ = &impl.queue_; new_registration->next_in_set_ = next; *insertion_point = new_registration; // Insert registration into the registration table. new_registration->next_in_table_ = registrations_[signal_number]; if (registrations_[signal_number]) registrations_[signal_number]->prev_in_table_ = new_registration; registrations_[signal_number] = new_registration; ++state->registration_count_[signal_number]; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::remove( signal_set_service::implementation_type& impl, int signal_number, asio::error_code& ec) { // Check that the signal number is valid. if (signal_number < 0 || signal_number >= max_signal_number) { ec = asio::error::invalid_argument; return ec; } signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); // Find the signal number in the list of registrations. registration** deletion_point = &impl.signals_; registration* reg = impl.signals_; while (reg && reg->signal_number_ < signal_number) { deletion_point = ®->next_in_set_; reg = reg->next_in_set_; } if (reg != 0 && reg->signal_number_ == signal_number) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[signal_number] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(signal_number, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(signal_number, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the set. *deletion_point = reg->next_in_set_; // Remove the registration from the registration table. if (registrations_[signal_number] == reg) registrations_[signal_number] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[signal_number]; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::clear( signal_set_service::implementation_type& impl, asio::error_code& ec) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (registration* reg = impl.signals_) { #if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Set signal handler back to the default if we're the last. if (state->registration_count_[reg->signal_number_] == 1) { # if defined(ASIO_HAS_SIGACTION) using namespace std; // For memset. struct sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_DFL; if (::sigaction(reg->signal_number_, &sa, 0) == -1) # else // defined(ASIO_HAS_SIGACTION) if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR) # endif // defined(ASIO_HAS_SIGACTION) { # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error::invalid_argument; # else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ec = asio::error_code(errno, asio::error::get_system_category()); # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ec; } } #endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION) // Remove the registration from the registration table. if (registrations_[reg->signal_number_] == reg) registrations_[reg->signal_number_] = reg->next_in_table_; if (reg->prev_in_table_) reg->prev_in_table_->next_in_table_ = reg->next_in_table_; if (reg->next_in_table_) reg->next_in_table_->prev_in_table_ = reg->prev_in_table_; --state->registration_count_[reg->signal_number_]; impl.signals_ = reg->next_in_set_; delete reg; } ec = asio::error_code(); return ec; } asio::error_code signal_set_service::cancel( signal_set_service::implementation_type& impl, asio::error_code& ec) { ASIO_HANDLER_OPERATION((scheduler_.context(), "signal_set", &impl, 0, "cancel")); op_queue ops; { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); while (signal_op* op = impl.queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.queue_.pop(); ops.push(op); } } scheduler_.post_deferred_completions(ops); ec = asio::error_code(); return ec; } void signal_set_service::deliver_signal(int signal_number) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); signal_set_service* service = state->service_list_; while (service) { op_queue ops; registration* reg = service->registrations_[signal_number]; while (reg) { if (reg->queue_->empty()) { ++reg->undelivered_; } else { while (signal_op* op = reg->queue_->front()) { op->signal_number_ = signal_number; reg->queue_->pop(); ops.push(op); } } reg = reg->next_in_table_; } service->scheduler_.post_deferred_completions(ops); service = service->next_; } } void signal_set_service::add_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the first service to be created, open a new pipe. if (state->service_list_ == 0) open_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If a scheduler_ object is thread-unsafe then it must be the only // scheduler used to create signal_set objects. if (state->service_list_ != 0) { if (!ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, service->scheduler_.concurrency_hint()) || !ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER, state->service_list_->scheduler_.concurrency_hint())) { std::logic_error ex( "Thread-unsafe execution context objects require " "exclusive access to signal handling."); asio::detail::throw_exception(ex); } } // Insert service into linked list of all services. service->next_ = state->service_list_; service->prev_ = 0; if (state->service_list_) state->service_list_->prev_ = service; state->service_list_ = service; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Register for pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.register_internal_descriptor(reactor::read_op, read_descriptor, service->reactor_data_, new pipe_read_op); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::remove_service(signal_set_service* service) { signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); if (service->next_ || service->prev_ || state->service_list_ == service) { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // Disable the pipe readiness notifications. int read_descriptor = state->read_descriptor_; lock.unlock(); service->reactor_.deregister_internal_descriptor( read_descriptor, service->reactor_data_); service->reactor_.cleanup_descriptor_data(service->reactor_data_); lock.lock(); #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // Remove service from linked list of all services. if (state->service_list_ == service) state->service_list_ = service->next_; if (service->prev_) service->prev_->next_ = service->next_; if (service->next_) service->next_->prev_= service->prev_; service->next_ = 0; service->prev_ = 0; #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) // If this is the last service to be removed, close the pipe. if (state->service_list_ == 0) close_descriptors(); #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) } } void signal_set_service::open_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); int pipe_fds[2]; if (::pipe(pipe_fds) == 0) { state->read_descriptor_ = pipe_fds[0]; ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK); state->write_descriptor_ = pipe_fds[1]; ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK); #if defined(FD_CLOEXEC) ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC); ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC); #endif // defined(FD_CLOEXEC) } else { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "signal_set_service pipe"); } #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::close_descriptors() { #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) signal_state* state = get_signal_state(); if (state->read_descriptor_ != -1) ::close(state->read_descriptor_); state->read_descriptor_ = -1; if (state->write_descriptor_ != -1) ::close(state->write_descriptor_); state->write_descriptor_ = -1; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) } void signal_set_service::start_wait_op( signal_set_service::implementation_type& impl, signal_op* op) { scheduler_.work_started(); signal_state* state = get_signal_state(); static_mutex::scoped_lock lock(state->mutex_); registration* reg = impl.signals_; while (reg) { if (reg->undelivered_ > 0) { --reg->undelivered_; op->signal_number_ = reg->signal_number_; scheduler_.post_deferred_completion(op); return; } reg = reg->next_in_set_; } impl.queue_.push(op); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/posix_mutex.ipp000644 000164 177776 00000002123 15107057155 023701 0ustar00jenkinsnogroup000000 000000 // // detail/impl/posix_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_mutex.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_mutex::posix_mutex() { int error = ::pthread_mutex_init(&mutex_, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP galera-4-26.4.25/asio/asio/detail/impl/win_event.ipp000644 000164 177776 00000003531 15107057155 023317 0ustar00jenkinsnogroup000000 000000 // // detail/win_event.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP #define ASIO_DETAIL_IMPL_WIN_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_event.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_event::win_event() : state_(0) { #if defined(ASIO_WINDOWS_APP) events_[0] = ::CreateEventExW(0, 0, CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS); #else // defined(ASIO_WINDOWS_APP) events_[0] = ::CreateEventW(0, true, false, 0); #endif // defined(ASIO_WINDOWS_APP) if (!events_[0]) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } #if defined(ASIO_WINDOWS_APP) events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS); #else // defined(ASIO_WINDOWS_APP) events_[1] = ::CreateEventW(0, false, false, 0); #endif // defined(ASIO_WINDOWS_APP) if (!events_[1]) { DWORD last_error = ::GetLastError(); ::CloseHandle(events_[0]); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "event"); } } win_event::~win_event() { ::CloseHandle(events_[0]); ::CloseHandle(events_[1]); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP galera-4-26.4.25/asio/asio/detail/impl/epoll_reactor.hpp000644 000164 177776 00000004466 15107057155 024162 0ustar00jenkinsnogroup000000 000000 // // detail/impl/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_EPOLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void epoll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void epoll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void epoll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { mutex::scoped_lock lock(mutex_); if (shutdown_) { scheduler_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); scheduler_.work_started(); if (earliest) update_timeout(); } template std::size_t epoll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); scheduler_.post_deferred_completions(ops); return n; } template void epoll_reactor::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source) { mutex::scoped_lock lock(mutex_); op_queue ops; queue.cancel_timer(target, ops); queue.move_timer(target, source); lock.unlock(); scheduler_.post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/impl/dev_poll_reactor.hpp000644 000164 177776 00000004657 15107057155 024655 0ustar00jenkinsnogroup000000 000000 // // detail/impl/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void dev_poll_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } template void dev_poll_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void dev_poll_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { scheduler_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); scheduler_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t dev_poll_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); scheduler_.post_deferred_completions(ops); return n; } template void dev_poll_reactor::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; queue.cancel_timer(target, ops); queue.move_timer(target, source); lock.unlock(); scheduler_.post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/impl/win_iocp_serial_port_service.ipp000644 000164 177776 00000013134 15107057155 027253 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_iocp_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/detail/win_iocp_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_iocp_serial_port_service::win_iocp_serial_port_service( execution_context& context) : execution_context_service_base(context), handle_service_(context) { } void win_iocp_serial_port_service::shutdown() { } asio::error_code win_iocp_serial_port_service::open( win_iocp_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } // For convenience, add a leading \\.\ sequence if not already present. std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device; // Open a handle to the serial port. ::HANDLE handle = ::CreateFileA(name.c_str(), GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0); if (handle == INVALID_HANDLE_VALUE) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Determine the initial serial port parameters. using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set some default serial port parameters. This implementation does not // support changing all of these, so they might as well be in a known state. dcb.fBinary = TRUE; // Win32 only supports binary mode. dcb.fNull = FALSE; // Do not ignore NULL characters. dcb.fAbortOnError = FALSE; // Ignore serial framing errors. dcb.BaudRate = 0; // 0 baud by default dcb.ByteSize = 8; // 8 bit bytes dcb.fOutxCtsFlow = FALSE; // No flow control dcb.fOutxDsrFlow = FALSE; dcb.fDtrControl = DTR_CONTROL_DISABLE; dcb.fDsrSensitivity = FALSE; dcb.fOutX = FALSE; dcb.fInX = FALSE; dcb.fRtsControl = DTR_CONTROL_DISABLE; dcb.fParity = FALSE; // No parity dcb.Parity = NOPARITY; dcb.StopBits = ONESTOPBIT; // One stop bit if (!::SetCommState(handle, &dcb)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // Set up timeouts so that the serial port will behave similarly to a // network socket. Reads wait for at least one byte, then return with // whatever they have. Writes return once everything is out the door. ::COMMTIMEOUTS timeouts; timeouts.ReadIntervalTimeout = 1; timeouts.ReadTotalTimeoutMultiplier = 0; timeouts.ReadTotalTimeoutConstant = 0; timeouts.WriteTotalTimeoutMultiplier = 0; timeouts.WriteTotalTimeoutConstant = 0; if (!::SetCommTimeouts(handle, &timeouts)) { DWORD last_error = ::GetLastError(); ::CloseHandle(handle); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } // We're done. Take ownership of the serial port handle. if (handle_service_.assign(impl, handle, ec)) ::CloseHandle(handle); return ec; } asio::error_code win_iocp_serial_port_service::do_set_option( win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { using namespace std; // For memcpy. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } if (store(option, dcb, ec)) return ec; if (!::SetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } ec = asio::error_code(); return ec; } asio::error_code win_iocp_serial_port_service::do_get_option( const win_iocp_serial_port_service::implementation_type& impl, win_iocp_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { using namespace std; // For memset. ::DCB dcb; memset(&dcb, 0, sizeof(DCB)); dcb.DCBlength = sizeof(DCB); if (!::GetCommState(handle_service_.native_handle(impl), &dcb)) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return ec; } return load(option, dcb, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/strand_service.ipp000644 000164 177776 00000011347 15107057155 024340 0ustar00jenkinsnogroup000000 000000 // // detail/impl/strand_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/call_stack.hpp" #include "asio/detail/strand_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct strand_service::on_do_complete_exit { io_context_impl* owner_; strand_impl* impl_; ~on_do_complete_exit() { impl_->mutex_.lock(); impl_->ready_queue_.push(impl_->waiting_queue_); bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty(); impl_->mutex_.unlock(); if (more_handlers) owner_->post_immediate_completion(impl_, true); } }; strand_service::strand_service(asio::io_context& io_context) : asio::detail::service_base(io_context), io_context_(asio::use_service(io_context)), mutex_(), salt_(0) { } void strand_service::shutdown() { op_queue ops; asio::detail::mutex::scoped_lock lock(mutex_); for (std::size_t i = 0; i < num_implementations; ++i) { if (strand_impl* impl = implementations_[i].get()) { ops.push(impl->waiting_queue_); ops.push(impl->ready_queue_); } } } void strand_service::construct(strand_service::implementation_type& impl) { asio::detail::mutex::scoped_lock lock(mutex_); std::size_t salt = salt_++; #if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = salt; #else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) std::size_t index = reinterpret_cast(&impl); index += (reinterpret_cast(&impl) >> 3); index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2); #endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION) index = index % num_implementations; if (!implementations_[index].get()) implementations_[index].reset(new strand_impl); impl = implementations_[index].get(); } bool strand_service::running_in_this_thread( const implementation_type& impl) const { return call_stack::contains(impl) != 0; } bool strand_service::do_dispatch(implementation_type& impl, operation* op) { // If we are running inside the io_context, and no other handler already // holds the strand lock, then the handler can run immediately. bool can_dispatch = io_context_.can_dispatch(); impl->mutex_.lock(); if (can_dispatch && !impl->locked_) { // Immediate invocation is allowed. impl->locked_ = true; impl->mutex_.unlock(); return true; } if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_context_.post_immediate_completion(impl, false); } return false; } void strand_service::do_post(implementation_type& impl, operation* op, bool is_continuation) { impl->mutex_.lock(); if (impl->locked_) { // Some other handler already holds the strand lock. Enqueue for later. impl->waiting_queue_.push(op); impl->mutex_.unlock(); } else { // The handler is acquiring the strand lock and so is responsible for // scheduling the strand. impl->locked_ = true; impl->mutex_.unlock(); impl->ready_queue_.push(op); io_context_.post_immediate_completion(impl, is_continuation); } } void strand_service::do_complete(void* owner, operation* base, const asio::error_code& ec, std::size_t /*bytes_transferred*/) { if (owner) { strand_impl* impl = static_cast(base); // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl); // Ensure the next handler, if any, is scheduled on block exit. on_do_complete_exit on_exit; on_exit.owner_ = static_cast(owner); on_exit.impl_ = impl; // Run all ready handlers. No lock is required since the ready queue is // accessed only within the strand. while (operation* o = impl->ready_queue_.front()) { impl->ready_queue_.pop(); o->complete(owner, ec, 0); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/dev_poll_reactor.ipp000644 000164 177776 00000031066 15107057155 024650 0ustar00jenkinsnogroup000000 000000 // // detail/impl/dev_poll_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include "asio/detail/dev_poll_reactor.hpp" #include "asio/detail/assert.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { dev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx) : asio::detail::execution_context_service_base(ctx), scheduler_(use_service(ctx)), mutex_(), dev_poll_fd_(do_dev_poll_create()), interrupter_(), shutdown_(false) { // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } dev_poll_reactor::~dev_poll_reactor() { shutdown(); ::close(dev_poll_fd_); } void dev_poll_reactor::shutdown() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; lock.unlock(); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); scheduler_.abandon_operations(ops); } void dev_poll_reactor::notify_fork( asio::execution_context::fork_event fork_ev) { if (fork_ev == asio::execution_context::fork_child) { detail::mutex::scoped_lock lock(mutex_); if (dev_poll_fd_ != -1) ::close(dev_poll_fd_); dev_poll_fd_ = -1; dev_poll_fd_ = do_dev_poll_create(); interrupter_.recreate(); // Add the interrupter's descriptor to /dev/poll. ::pollfd ev = { 0, 0, 0 }; ev.fd = interrupter_.read_descriptor(); ev.events = POLLIN | POLLERR; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Re-register all descriptors with /dev/poll. The changes will be written // to the /dev/poll descriptor the next time the reactor is run. for (int i = 0; i < max_ops; ++i) { reactor_op_queue::iterator iter = op_queue_[i].begin(); reactor_op_queue::iterator end = op_queue_[i].end(); for (; iter != end; ++iter) { ::pollfd& pending_ev = add_pending_event_change(iter->first); pending_ev.events |= POLLERR | POLLHUP; switch (i) { case read_op: pending_ev.events |= POLLIN; break; case write_op: pending_ev.events |= POLLOUT; break; case except_op: pending_ev.events |= POLLPRI; break; default: break; } } } interrupter_.interrupt(); } } void dev_poll_reactor::init_task() { scheduler_.init_task(); } int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&) { return 0; } int dev_poll_reactor::register_internal_descriptor(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; switch (op_type) { case read_op: ev.events |= POLLIN; break; case write_op: ev.events |= POLLOUT; break; case except_op: ev.events |= POLLPRI; break; default: break; } interrupter_.interrupt(); return 0; } void dev_poll_reactor::move_descriptor(socket_type, dev_poll_reactor::per_descriptor_data&, dev_poll_reactor::per_descriptor_data&) { } void dev_poll_reactor::start_op(int op_type, socket_type descriptor, dev_poll_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } if (allow_speculative) { if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor)) { if (!op_queue_[op_type].has_operation(descriptor)) { if (op->perform()) { lock.unlock(); scheduler_.post_immediate_completion(op, is_continuation); return; } } } } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); scheduler_.work_started(); if (first) { ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLERR | POLLHUP; if (op_type == read_op || op_queue_[read_op].has_operation(descriptor)) ev.events |= POLLIN; if (op_type == write_op || op_queue_[write_op].has_operation(descriptor)) ev.events |= POLLOUT; if (op_type == except_op || op_queue_[except_op].has_operation(descriptor)) ev.events |= POLLPRI; interrupter_.interrupt(); } } void dev_poll_reactor::cancel_ops(socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_descriptor(socket_type descriptor, dev_poll_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. ::pollfd& ev = add_pending_event_change(descriptor); ev.events = POLLREMOVE; interrupter_.interrupt(); // Cancel any outstanding operations associated with the descriptor. cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void dev_poll_reactor::deregister_internal_descriptor( socket_type descriptor, dev_poll_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); // Remove the descriptor from /dev/poll. Since this function is only called // during a fork, we can apply the change immediately. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); // Destroy all operations associated with the descriptor. op_queue ops; asio::error_code ec; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops, ec); } void dev_poll_reactor::cleanup_descriptor_data( dev_poll_reactor::per_descriptor_data&) { } void dev_poll_reactor::run(long usec, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty() && op_queue_[except_op].empty() && timer_queues_.all_empty()) return; // Write the pending event registration changes to the /dev/poll descriptor. std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size(); if (events_size > 0) { errno = 0; int result = ::write(dev_poll_fd_, &pending_event_changes_[0], events_size); if (result != static_cast(events_size)) { asio::error_code ec = asio::error_code( errno, asio::error::get_system_category()); for (std::size_t i = 0; i < pending_event_changes_.size(); ++i) { int descriptor = pending_event_changes_[i].fd; for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } pending_event_changes_.clear(); pending_event_change_index_.clear(); } // Calculate timeout. int timeout; if (usec == 0) timeout = 0; else { timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1); timeout = get_timeout(timeout); } lock.unlock(); // Block on the /dev/poll descriptor. ::pollfd events[128] = { { 0, 0, 0 } }; ::dvpoll dp = { 0, 0, 0 }; dp.dp_fds = events; dp.dp_nfds = 128; dp.dp_timeout = timeout; int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp); lock.lock(); // Dispatch the waiting events. for (int i = 0; i < num_events; ++i) { int descriptor = events[i].fd; if (descriptor == interrupter_.read_descriptor()) { interrupter_.reset(); } else { bool more_reads = false; bool more_writes = false; bool more_except = false; // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. if (events[i].events & (POLLPRI | POLLERR | POLLHUP)) more_except = op_queue_[except_op].perform_operations(descriptor, ops); else more_except = op_queue_[except_op].has_operation(descriptor); if (events[i].events & (POLLIN | POLLERR | POLLHUP)) more_reads = op_queue_[read_op].perform_operations(descriptor, ops); else more_reads = op_queue_[read_op].has_operation(descriptor); if (events[i].events & (POLLOUT | POLLERR | POLLHUP)) more_writes = op_queue_[write_op].perform_operations(descriptor, ops); else more_writes = op_queue_[write_op].has_operation(descriptor); if ((events[i].events & (POLLERR | POLLHUP)) != 0 && !more_except && !more_reads && !more_writes) { // If we have an event and no operations associated with the // descriptor then we need to delete the descriptor from /dev/poll. // The poll operation can produce POLLHUP or POLLERR events when there // is no operation pending, so if we do not remove the descriptor we // can end up in a tight polling loop. ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLREMOVE; ev.revents = 0; ::write(dev_poll_fd_, &ev, sizeof(ev)); } else { ::pollfd ev = { 0, 0, 0 }; ev.fd = descriptor; ev.events = POLLERR | POLLHUP; if (more_reads) ev.events |= POLLIN; if (more_writes) ev.events |= POLLOUT; if (more_except) ev.events |= POLLPRI; ev.revents = 0; int result = ::write(dev_poll_fd_, &ev, sizeof(ev)); if (result != sizeof(ev)) { asio::error_code ec(errno, asio::error::get_system_category()); for (int j = 0; j < max_ops; ++j) op_queue_[j].cancel_operations(descriptor, ops, ec); } } } } timer_queues_.get_ready_timers(ops); } void dev_poll_reactor::interrupt() { interrupter_.interrupt(); } int dev_poll_reactor::do_dev_poll_create() { int fd = ::open("/dev/poll", O_RDWR); if (fd == -1) { asio::error_code ec(errno, asio::error::get_system_category()); asio::detail::throw_error(ec, "/dev/poll"); } return fd; } void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } int dev_poll_reactor::get_timeout(int msec) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. const int max_msec = 5 * 60 * 1000; return timer_queues_.wait_duration_msec( (msec < 0 || max_msec < msec) ? max_msec : msec); } void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; scheduler_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } ::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor) { hash_map::iterator iter = pending_event_change_index_.find(descriptor); if (iter == pending_event_change_index_.end()) { std::size_t index = pending_event_changes_.size(); pending_event_changes_.reserve(pending_event_changes_.size() + 1); pending_event_change_index_.insert(std::make_pair(descriptor, index)); pending_event_changes_.push_back(::pollfd()); pending_event_changes_[index].fd = descriptor; pending_event_changes_[index].revents = 0; return pending_event_changes_[index]; } else { return pending_event_changes_[iter->second]; } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP galera-4-26.4.25/asio/asio/detail/impl/select_reactor.ipp000644 000164 177776 00000021260 15107057155 024316 0ustar00jenkinsnogroup000000 000000 // // detail/impl/select_reactor.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/select_reactor.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) class select_reactor::thread_function { public: explicit thread_function(select_reactor* r) : this_(r) { } void operator()() { this_->run_thread(); } private: select_reactor* this_; }; #endif // defined(ASIO_HAS_IOCP) select_reactor::select_reactor(asio::execution_context& ctx) : execution_context_service_base(ctx), scheduler_(use_service(ctx)), mutex_(), interrupter_(), #if defined(ASIO_HAS_IOCP) stop_thread_(false), thread_(0), #endif // defined(ASIO_HAS_IOCP) shutdown_(false) { #if defined(ASIO_HAS_IOCP) asio::detail::signal_blocker sb; thread_ = new asio::detail::thread(thread_function(this)); #endif // defined(ASIO_HAS_IOCP) } select_reactor::~select_reactor() { shutdown(); } void select_reactor::shutdown() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; #if defined(ASIO_HAS_IOCP) stop_thread_ = true; #endif // defined(ASIO_HAS_IOCP) lock.unlock(); #if defined(ASIO_HAS_IOCP) if (thread_) { interrupter_.interrupt(); thread_->join(); delete thread_; thread_ = 0; } #endif // defined(ASIO_HAS_IOCP) op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].get_all_operations(ops); timer_queues_.get_all_timers(ops); scheduler_.abandon_operations(ops); } void select_reactor::notify_fork( asio::execution_context::fork_event fork_ev) { if (fork_ev == asio::execution_context::fork_child) interrupter_.recreate(); } void select_reactor::init_task() { scheduler_.init_task(); } int select_reactor::register_descriptor(socket_type, select_reactor::per_descriptor_data&) { return 0; } int select_reactor::register_internal_descriptor( int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue_[op_type].enqueue_operation(descriptor, op); interrupter_.interrupt(); return 0; } void select_reactor::move_descriptor(socket_type, select_reactor::per_descriptor_data&, select_reactor::per_descriptor_data&) { } void select_reactor::start_op(int op_type, socket_type descriptor, select_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation, bool) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { post_immediate_completion(op, is_continuation); return; } bool first = op_queue_[op_type].enqueue_operation(descriptor, op); scheduler_.work_started(); if (first) interrupter_.interrupt(); } void select_reactor::cancel_ops(socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_descriptor(socket_type descriptor, select_reactor::per_descriptor_data&, bool) { asio::detail::mutex::scoped_lock lock(mutex_); cancel_ops_unlocked(descriptor, asio::error::operation_aborted); } void select_reactor::deregister_internal_descriptor( socket_type descriptor, select_reactor::per_descriptor_data&) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; for (int i = 0; i < max_ops; ++i) op_queue_[i].cancel_operations(descriptor, ops); } void select_reactor::cleanup_descriptor_data( select_reactor::per_descriptor_data&) { } void select_reactor::run(long usec, op_queue& ops) { asio::detail::mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_IOCP) // Check if the thread is supposed to stop. if (stop_thread_) return; #endif // defined(ASIO_HAS_IOCP) // Set up the descriptor sets. for (int i = 0; i < max_select_ops; ++i) fd_sets_[i].reset(); fd_sets_[read_op].set(interrupter_.read_descriptor()); socket_type max_fd = 0; bool have_work_to_do = !timer_queues_.all_empty(); for (int i = 0; i < max_select_ops; ++i) { have_work_to_do = have_work_to_do || !op_queue_[i].empty(); fd_sets_[i].set(op_queue_[i], ops); if (fd_sets_[i].max_descriptor() > max_fd) max_fd = fd_sets_[i].max_descriptor(); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty(); fd_sets_[write_op].set(op_queue_[connect_op], ops); if (fd_sets_[write_op].max_descriptor() > max_fd) max_fd = fd_sets_[write_op].max_descriptor(); fd_sets_[except_op].set(op_queue_[connect_op], ops); if (fd_sets_[except_op].max_descriptor() > max_fd) max_fd = fd_sets_[except_op].max_descriptor(); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // We can return immediately if there's no work to do and the reactor is // not supposed to block. if (!usec && !have_work_to_do) return; // Determine how long to block while waiting for events. timeval tv_buf = { 0, 0 }; timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf; lock.unlock(); // Block on the select call until descriptors become ready. asio::error_code ec; int retval = socket_ops::select(static_cast(max_fd + 1), fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec); // Reset the interrupter. if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor())) { interrupter_.reset(); --retval; } lock.lock(); // Dispatch all ready operations. if (retval > 0) { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Connection operations on Windows use both except and write fd_sets. fd_sets_[except_op].perform(op_queue_[connect_op], ops); fd_sets_[write_op].perform(op_queue_[connect_op], ops); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Exception operations must be processed first to ensure that any // out-of-band data is read before normal data. for (int i = max_select_ops - 1; i >= 0; --i) fd_sets_[i].perform(op_queue_[i], ops); } timer_queues_.get_ready_timers(ops); } void select_reactor::interrupt() { interrupter_.interrupt(); } #if defined(ASIO_HAS_IOCP) void select_reactor::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { lock.unlock(); op_queue ops; run(true, ops); scheduler_.post_deferred_completions(ops); lock.lock(); } } #endif // defined(ASIO_HAS_IOCP) void select_reactor::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void select_reactor::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } timeval* select_reactor::get_timeout(long usec, timeval& tv) { // By default we will wait no longer than 5 minutes. This will ensure that // any changes to the system clock are detected after no longer than this. const long max_usec = 5 * 60 * 1000 * 1000; usec = timer_queues_.wait_duration_usec( (usec < 0 || max_usec < usec) ? max_usec : usec); tv.tv_sec = usec / 1000000; tv.tv_usec = usec % 1000000; return &tv; } void select_reactor::cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec) { bool need_interrupt = false; op_queue ops; for (int i = 0; i < max_ops; ++i) need_interrupt = op_queue_[i].cancel_operations( descriptor, ops, ec) || need_interrupt; scheduler_.post_deferred_completions(ops); if (need_interrupt) interrupter_.interrupt(); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE)) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP galera-4-26.4.25/asio/asio/detail/impl/winsock_init.ipp000644 000164 177776 00000003574 15107057155 024030 0ustar00jenkinsnogroup000000 000000 // // detail/impl/winsock_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void winsock_init_base::startup(data& d, unsigned char major, unsigned char minor) { if (::InterlockedIncrement(&d.init_count_) == 1) { WSADATA wsa_data; long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data); ::InterlockedExchange(&d.result_, result); } } void winsock_init_base::manual_startup(data& d) { if (::InterlockedIncrement(&d.init_count_) == 1) { ::InterlockedExchange(&d.result_, 0); } } void winsock_init_base::cleanup(data& d) { if (::InterlockedDecrement(&d.init_count_) == 0) { ::WSACleanup(); } } void winsock_init_base::manual_cleanup(data& d) { ::InterlockedDecrement(&d.init_count_); } void winsock_init_base::throw_on_error(data& d) { long result = ::InterlockedExchangeAdd(&d.result_, 0); if (result != 0) { asio::error_code ec(result, asio::error::get_system_category()); asio::detail::throw_error(ec, "winsock"); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP galera-4-26.4.25/asio/asio/detail/impl/throw_error.ipp000644 000164 177776 00000003512 15107057155 023674 0ustar00jenkinsnogroup000000 000000 // // detail/impl/throw_error.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP #define ASIO_DETAIL_IMPL_THROW_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void do_throw_error(const asio::error_code& err) { asio::system_error e(err); asio::detail::throw_exception(e); } void do_throw_error(const asio::error_code& err, const char* location) { // boostify: non-boost code starts here #if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // Microsoft's implementation of std::system_error is non-conformant in that // it ignores the error code's message when a "what" string is supplied. We'll // work around this by explicitly formatting the "what" string. std::string what_msg = location; what_msg += ": "; what_msg += err.message(); asio::system_error e(err, what_msg); asio::detail::throw_exception(e); #else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here asio::system_error e(err, location); asio::detail::throw_exception(e); // boostify: non-boost code starts here #endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR) // boostify: non-boost code ends here } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP galera-4-26.4.25/asio/asio/detail/impl/win_iocp_io_context.hpp000644 000164 177776 00000005526 15107057155 025370 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_iocp_io_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/completion_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void win_iocp_io_context::add_timer_queue( timer_queue& queue) { do_add_timer_queue(queue); } template void win_iocp_io_context::remove_timer_queue( timer_queue& queue) { do_remove_timer_queue(queue); } template void win_iocp_io_context::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { // If the service has been shut down we silently discard the timer. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) { post_immediate_completion(op, false); return; } mutex::scoped_lock lock(dispatch_mutex_); bool earliest = queue.enqueue_timer(time, timer, op); work_started(); if (earliest) update_timeout(); } template std::size_t win_iocp_io_context::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { // If the service has been shut down we silently ignore the cancellation. if (::InterlockedExchangeAdd(&shutdown_, 0) != 0) return 0; mutex::scoped_lock lock(dispatch_mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); post_deferred_completions(ops); return n; } template void win_iocp_io_context::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& to, typename timer_queue::per_timer_data& from) { asio::detail::mutex::scoped_lock lock(dispatch_mutex_); op_queue ops; queue.cancel_timer(to, ops); queue.move_timer(to, from); lock.unlock(); post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP galera-4-26.4.25/asio/asio/detail/impl/win_thread.ipp000644 000164 177776 00000007514 15107057155 023452 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP #define ASIO_DETAIL_IMPL_WIN_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_APP) \ && !defined(UNDER_CE) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_thread.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_thread::~win_thread() { ::CloseHandle(thread_); // The exit_event_ handle is deliberately allowed to leak here since it // is an error for the owner of an internal thread not to join() it. } void win_thread::join() { HANDLE handles[2] = { exit_event_, thread_ }; ::WaitForMultipleObjects(2, handles, FALSE, INFINITE); ::CloseHandle(exit_event_); if (terminate_threads()) { ::TerminateThread(thread_, 0); } else { ::QueueUserAPC(apc_function, thread_, 0); ::WaitForSingleObject(thread_, INFINITE); } } std::size_t win_thread::hardware_concurrency() { SYSTEM_INFO system_info; ::GetSystemInfo(&system_info); return system_info.dwNumberOfProcessors; } void win_thread::start_thread(func_base* arg, unsigned int stack_size) { ::HANDLE entry_event = 0; arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0); if (!entry_event) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.entry_event"); } arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0); if (!exit_event_) { DWORD last_error = ::GetLastError(); delete arg; asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread.exit_event"); } unsigned int thread_id = 0; thread_ = reinterpret_cast(::_beginthreadex(0, stack_size, win_thread_function, arg, 0, &thread_id)); if (!thread_) { DWORD last_error = ::GetLastError(); delete arg; if (entry_event) ::CloseHandle(entry_event); if (exit_event_) ::CloseHandle(exit_event_); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } if (entry_event) { ::WaitForSingleObject(entry_event, INFINITE); ::CloseHandle(entry_event); } } unsigned int __stdcall win_thread_function(void* arg) { win_thread::auto_func_base_ptr func = { static_cast(arg) }; ::SetEvent(func.ptr->entry_event_); func.ptr->run(); // Signal that the thread has finished its work, but rather than returning go // to sleep to put the thread into a well known state. If the thread is being // joined during global object destruction then it may be killed using // TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx // call will be interrupted using QueueUserAPC and the thread will shut down // cleanly. HANDLE exit_event = func.ptr->exit_event_; delete func.ptr; func.ptr = 0; ::SetEvent(exit_event); ::SleepEx(INFINITE, TRUE); return 0; } #if defined(WINVER) && (WINVER < 0x0500) void __stdcall apc_function(ULONG) {} #else void __stdcall apc_function(ULONG_PTR) {} #endif } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_APP) // && !defined(UNDER_CE) #endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP galera-4-26.4.25/asio/asio/detail/impl/win_mutex.ipp000644 000164 177776 00000004031 15107057155 023334 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_mutex::win_mutex() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "mutex"); } int win_mutex::do_init() { #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) return ::GetLastError(); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif return 0; #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) return ::GetLastError(); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) return ::GetLastError(); # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { return ERROR_OUTOFMEMORY; } return 0; #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP galera-4-26.4.25/asio/asio/detail/impl/service_registry.hpp000644 000164 177776 00000005013 15107057155 024705 0ustar00jenkinsnogroup000000 000000 // // detail/impl/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template Service& service_registry::use_service() { execution_context::service::key key; init_key(key, 0); factory_type factory = &service_registry::create; return *static_cast(do_use_service(key, factory, &owner_)); } template Service& service_registry::use_service(io_context& owner) { execution_context::service::key key; init_key(key, 0); factory_type factory = &service_registry::create; return *static_cast(do_use_service(key, factory, &owner)); } template void service_registry::add_service(Service* new_service) { execution_context::service::key key; init_key(key, 0); return do_add_service(key, new_service); } template bool service_registry::has_service() const { execution_context::service::key key; init_key(key, 0); return do_has_service(key); } template inline void service_registry::init_key( execution_context::service::key& key, ...) { init_key_from_id(key, Service::id); } #if !defined(ASIO_NO_TYPEID) template void service_registry::init_key(execution_context::service::key& key, typename enable_if< is_base_of::value>::type*) { key.type_info_ = &typeid(typeid_wrapper); key.id_ = 0; } template void service_registry::init_key_from_id(execution_context::service::key& key, const service_id& /*id*/) { key.type_info_ = &typeid(typeid_wrapper); key.id_ = 0; } #endif // !defined(ASIO_NO_TYPEID) template execution_context::service* service_registry::create(void* owner) { return new Service(*static_cast(owner)); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP galera-4-26.4.25/asio/asio/detail/impl/timer_queue_ptime.ipp000644 000164 177776 00000004415 15107057155 025045 0ustar00jenkinsnogroup000000 000000 // // detail/impl/timer_queue_ptime.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/detail/timer_queue_ptime.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { timer_queue >::timer_queue() { } timer_queue >::~timer_queue() { } bool timer_queue >::enqueue_timer( const time_type& time, per_timer_data& timer, wait_op* op) { return impl_.enqueue_timer(time, timer, op); } bool timer_queue >::empty() const { return impl_.empty(); } long timer_queue >::wait_duration_msec( long max_duration) const { return impl_.wait_duration_msec(max_duration); } long timer_queue >::wait_duration_usec( long max_duration) const { return impl_.wait_duration_usec(max_duration); } void timer_queue >::get_ready_timers( op_queue& ops) { impl_.get_ready_timers(ops); } void timer_queue >::get_all_timers( op_queue& ops) { impl_.get_all_timers(ops); } std::size_t timer_queue >::cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled) { return impl_.cancel_timer(timer, ops, max_cancelled); } void timer_queue >::move_timer( per_timer_data& target, per_timer_data& source) { impl_.move_timer(target, source); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP galera-4-26.4.25/asio/asio/detail/impl/posix_tss_ptr.ipp000644 000164 177776 00000002155 15107057155 024242 0ustar00jenkinsnogroup000000 000000 // // detail/impl/posix_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_tss_ptr.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void posix_tss_ptr_create(pthread_key_t& key) { int error = ::pthread_key_create(&key, 0); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP galera-4-26.4.25/asio/asio/detail/impl/posix_thread.ipp000644 000164 177776 00000003440 15107057155 024011 0ustar00jenkinsnogroup000000 000000 // // detail/impl/posix_thread.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include "asio/detail/posix_thread.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { posix_thread::~posix_thread() { if (!joined_) ::pthread_detach(thread_); } void posix_thread::join() { if (!joined_) { ::pthread_join(thread_, 0); joined_ = true; } } std::size_t posix_thread::hardware_concurrency() { #if defined(_SC_NPROCESSORS_ONLN) long result = sysconf(_SC_NPROCESSORS_ONLN); if (result > 0) return result; #endif // defined(_SC_NPROCESSORS_ONLN) return 0; } void posix_thread::start_thread(func_base* arg) { int error = ::pthread_create(&thread_, 0, asio_detail_posix_thread_function, arg); if (error != 0) { delete arg; asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } } void* asio_detail_posix_thread_function(void* arg) { posix_thread::auto_func_base_ptr func = { static_cast(arg) }; func.ptr->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP galera-4-26.4.25/asio/asio/detail/impl/null_event.ipp000644 000164 177776 00000003375 15107057155 023502 0ustar00jenkinsnogroup000000 000000 // // detail/impl/null_event.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP #define ASIO_DETAIL_IMPL_NULL_EVENT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/socket_types.hpp" #else # include # if defined(__hpux) # include # endif # if !defined(__hpux) || defined(__SELECT) # include # endif #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void null_event::do_wait() { #if defined(ASIO_WINDOWS_RUNTIME) std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)()); #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) ::Sleep(INFINITE); #else ::pause(); #endif } void null_event::do_wait_for_usec(long usec) { #if defined(ASIO_WINDOWS_RUNTIME) std::this_thread::sleep_for(std::chrono::microseconds(usec)); #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) ::Sleep(usec / 1000); #elif defined(__hpux) && defined(__SELECT) timespec ts; ts.tv_sec = usec / 1000000; ts.tv_nsec = (usec % 1000000) * 1000; ::pselect(0, 0, 0, 0, &ts, 0); #else timeval tv; tv.tv_sec = usec / 1000000; tv.tv_usec = usec % 1000000; ::select(0, 0, 0, 0, &tv); #endif } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP galera-4-26.4.25/asio/asio/detail/impl/scheduler.ipp000644 000164 177776 00000034020 15107057155 023274 0ustar00jenkinsnogroup000000 000000 // // detail/impl/scheduler.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP #define ASIO_DETAIL_IMPL_SCHEDULER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/concurrency_hint.hpp" #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/scheduler.hpp" #include "asio/detail/scheduler_thread_info.hpp" #include "asio/detail/signal_blocker.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class scheduler::thread_function { public: explicit thread_function(scheduler* s) : this_(s) { } void operator()() { asio::error_code ec; this_->run(ec); } private: scheduler* this_; }; struct scheduler::task_cleanup { ~task_cleanup() { if (this_thread_->private_outstanding_work > 0) { asio::detail::increment( scheduler_->outstanding_work_, this_thread_->private_outstanding_work); } this_thread_->private_outstanding_work = 0; // Enqueue the completed operations and reinsert the task at the end of // the operation queue. lock_->lock(); scheduler_->task_interrupted_ = true; scheduler_->op_queue_.push(this_thread_->private_op_queue); scheduler_->op_queue_.push(&scheduler_->task_operation_); } scheduler* scheduler_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; struct scheduler::work_cleanup { ~work_cleanup() { if (this_thread_->private_outstanding_work > 1) { asio::detail::increment( scheduler_->outstanding_work_, this_thread_->private_outstanding_work - 1); } else if (this_thread_->private_outstanding_work < 1) { scheduler_->work_finished(); } this_thread_->private_outstanding_work = 0; #if defined(ASIO_HAS_THREADS) if (!this_thread_->private_op_queue.empty()) { lock_->lock(); scheduler_->op_queue_.push(this_thread_->private_op_queue); } #endif // defined(ASIO_HAS_THREADS) } scheduler* scheduler_; mutex::scoped_lock* lock_; thread_info* this_thread_; }; scheduler::scheduler(asio::execution_context& ctx, int concurrency_hint, bool own_thread) : asio::detail::execution_context_service_base(ctx), one_thread_(concurrency_hint == 1 || !ASIO_CONCURRENCY_HINT_IS_LOCKING( SCHEDULER, concurrency_hint) || !ASIO_CONCURRENCY_HINT_IS_LOCKING( REACTOR_IO, concurrency_hint)), mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING( SCHEDULER, concurrency_hint)), task_(0), task_interrupted_(true), outstanding_work_(0), stopped_(false), shutdown_(false), concurrency_hint_(concurrency_hint), thread_(0) { ASIO_HANDLER_TRACKING_INIT; if (own_thread) { ++outstanding_work_; asio::detail::signal_blocker sb; thread_ = new asio::detail::thread(thread_function(this)); } } scheduler::~scheduler() { if (thread_) { thread_->join(); delete thread_; } } void scheduler::shutdown() { mutex::scoped_lock lock(mutex_); shutdown_ = true; if (thread_) stop_all_threads(lock); lock.unlock(); // Join thread to ensure task operation is returned to queue. if (thread_) { thread_->join(); delete thread_; thread_ = 0; } // Destroy handler objects. while (!op_queue_.empty()) { operation* o = op_queue_.front(); op_queue_.pop(); if (o != &task_operation_) o->destroy(); } // Reset to initial state. task_ = 0; } void scheduler::init_task() { mutex::scoped_lock lock(mutex_); if (!shutdown_ && !task_) { task_ = &use_service(this->context()); op_queue_.push(&task_operation_); wake_one_thread_and_unlock(lock); } } std::size_t scheduler::run(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); std::size_t n = 0; for (; do_run_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t scheduler::run_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); return do_run_one(lock, this_thread, ec); } std::size_t scheduler::wait_one(long usec, asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); return do_wait_one(lock, this_thread, usec, ec); } std::size_t scheduler::poll(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_info = static_cast(ctx.next_by_key())) op_queue_.push(outer_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) std::size_t n = 0; for (; do_poll_one(lock, this_thread, ec); lock.lock()) if (n != (std::numeric_limits::max)()) ++n; return n; } std::size_t scheduler::poll_one(asio::error_code& ec) { ec = asio::error_code(); if (outstanding_work_ == 0) { stop(); return 0; } thread_info this_thread; this_thread.private_outstanding_work = 0; thread_call_stack::context ctx(this, this_thread); mutex::scoped_lock lock(mutex_); #if defined(ASIO_HAS_THREADS) // We want to support nested calls to poll() and poll_one(), so any handlers // that are already on a thread-private queue need to be put on to the main // queue now. if (one_thread_) if (thread_info* outer_info = static_cast(ctx.next_by_key())) op_queue_.push(outer_info->private_op_queue); #endif // defined(ASIO_HAS_THREADS) return do_poll_one(lock, this_thread, ec); } void scheduler::stop() { mutex::scoped_lock lock(mutex_); stop_all_threads(lock); } bool scheduler::stopped() const { mutex::scoped_lock lock(mutex_); return stopped_; } void scheduler::restart() { mutex::scoped_lock lock(mutex_); stopped_ = false; } void scheduler::compensating_work_started() { thread_info_base* this_thread = thread_call_stack::contains(this); ++static_cast(this_thread)->private_outstanding_work; } void scheduler::post_immediate_completion( scheduler::operation* op, bool is_continuation) { #if defined(ASIO_HAS_THREADS) if (one_thread_ || is_continuation) { if (thread_info_base* this_thread = thread_call_stack::contains(this)) { ++static_cast(this_thread)->private_outstanding_work; static_cast(this_thread)->private_op_queue.push(op); return; } } #else // defined(ASIO_HAS_THREADS) (void)is_continuation; #endif // defined(ASIO_HAS_THREADS) work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void scheduler::post_deferred_completion(scheduler::operation* op) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info_base* this_thread = thread_call_stack::contains(this)) { static_cast(this_thread)->private_op_queue.push(op); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void scheduler::post_deferred_completions( op_queue& ops) { if (!ops.empty()) { #if defined(ASIO_HAS_THREADS) if (one_thread_) { if (thread_info_base* this_thread = thread_call_stack::contains(this)) { static_cast(this_thread)->private_op_queue.push(ops); return; } } #endif // defined(ASIO_HAS_THREADS) mutex::scoped_lock lock(mutex_); op_queue_.push(ops); wake_one_thread_and_unlock(lock); } } void scheduler::do_dispatch( scheduler::operation* op) { work_started(); mutex::scoped_lock lock(mutex_); op_queue_.push(op); wake_one_thread_and_unlock(lock); } void scheduler::abandon_operations( op_queue& ops) { op_queue ops2; ops2.push(ops); } std::size_t scheduler::do_run_one(mutex::scoped_lock& lock, scheduler::thread_info& this_thread, const asio::error_code& ec) { while (!stopped_) { if (!op_queue_.empty()) { // Prepare to execute first handler from queue. operation* o = op_queue_.front(); op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); if (o == &task_operation_) { task_interrupted_ = more_handlers; if (more_handlers && !one_thread_) wakeup_event_.unlock_and_signal_one(lock); else lock.unlock(); task_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue); } else { std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(this, ec, task_result); return 1; } } else { wakeup_event_.clear(lock); wakeup_event_.wait(lock); } } return 0; } std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock, scheduler::thread_info& this_thread, long usec, const asio::error_code& ec) { if (stopped_) return 0; operation* o = op_queue_.front(); if (o == 0) { wakeup_event_.clear(lock); wakeup_event_.wait_for_usec(lock, usec); usec = 0; // Wait at most once. o = op_queue_.front(); } if (o == &task_operation_) { op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); task_interrupted_ = more_handlers; if (more_handlers && !one_thread_) wakeup_event_.unlock_and_signal_one(lock); else lock.unlock(); { task_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue); } o = op_queue_.front(); if (o == &task_operation_) { if (!one_thread_) wakeup_event_.maybe_unlock_and_signal_one(lock); return 0; } } if (o == 0) return 0; op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(this, ec, task_result); return 1; } std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock, scheduler::thread_info& this_thread, const asio::error_code& ec) { if (stopped_) return 0; operation* o = op_queue_.front(); if (o == &task_operation_) { op_queue_.pop(); lock.unlock(); { task_cleanup c = { this, &lock, &this_thread }; (void)c; // Run the task. May throw an exception. Only block if the operation // queue is empty and we're not polling, otherwise we want to return // as soon as possible. task_->run(0, this_thread.private_op_queue); } o = op_queue_.front(); if (o == &task_operation_) { wakeup_event_.maybe_unlock_and_signal_one(lock); return 0; } } if (o == 0) return 0; op_queue_.pop(); bool more_handlers = (!op_queue_.empty()); std::size_t task_result = o->task_result_; if (more_handlers && !one_thread_) wake_one_thread_and_unlock(lock); else lock.unlock(); // Ensure the count of outstanding work is decremented on block exit. work_cleanup on_exit = { this, &lock, &this_thread }; (void)on_exit; // Complete the operation. May throw an exception. Deletes the object. o->complete(this, ec, task_result); return 1; } void scheduler::stop_all_threads( mutex::scoped_lock& lock) { stopped_ = true; wakeup_event_.signal_all(lock); if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } } void scheduler::wake_one_thread_and_unlock( mutex::scoped_lock& lock) { if (!wakeup_event_.maybe_unlock_and_signal_one(lock)) { if (!task_interrupted_ && task_) { task_interrupted_ = true; task_->interrupt(); } lock.unlock(); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP galera-4-26.4.25/asio/asio/detail/impl/win_object_handle_service.ipp000644 000164 177776 00000027561 15107057155 026510 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_object_handle_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/win_object_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { win_object_handle_service::win_object_handle_service(execution_context& context) : execution_context_service_base(context), scheduler_(asio::use_service(context)), mutex_(), impl_list_(0), shutdown_(false) { } void win_object_handle_service::shutdown() { mutex::scoped_lock lock(mutex_); // Setting this flag to true prevents new objects from being registered, and // new asynchronous wait operations from being started. We only need to worry // about cleaning up the operations that are currently in progress. shutdown_ = true; op_queue ops; for (implementation_type* impl = impl_list_; impl; impl = impl->next_) ops.push(impl->op_queue_); lock.unlock(); scheduler_.abandon_operations(ops); } void win_object_handle_service::construct( win_object_handle_service::implementation_type& impl) { impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.owner_ = this; // Insert implementation into linked list of all implementations. mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } } void win_object_handle_service::move_construct( win_object_handle_service::implementation_type& impl, win_object_handle_service::implementation_type& other_impl) { mutex::scoped_lock lock(mutex_); // Insert implementation into linked list of all implementations. if (!shutdown_) { impl.next_ = impl_list_; impl.prev_ = 0; if (impl_list_) impl_list_->prev_ = &impl; impl_list_ = &impl; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::move_assign( win_object_handle_service::implementation_type& impl, win_object_handle_service& other_service, win_object_handle_service::implementation_type& other_impl) { asio::error_code ignored_ec; close(impl, ignored_ec); mutex::scoped_lock lock(mutex_); if (this != &other_service) { // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; } impl.handle_ = other_impl.handle_; other_impl.handle_ = INVALID_HANDLE_VALUE; impl.wait_handle_ = other_impl.wait_handle_; other_impl.wait_handle_ = INVALID_HANDLE_VALUE; impl.op_queue_.push(other_impl.op_queue_); impl.owner_ = this; if (this != &other_service) { // Insert implementation into linked list of all implementations. impl.next_ = other_service.impl_list_; impl.prev_ = 0; if (other_service.impl_list_) other_service.impl_list_->prev_ = &impl; other_service.impl_list_ = &impl; } // We must not hold the lock while calling UnregisterWaitEx. This is because // the registered callback function might be invoked while we are waiting for // UnregisterWaitEx to complete. lock.unlock(); if (impl.wait_handle_ != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE); if (!impl.op_queue_.empty()) register_wait_callback(impl, lock); } void win_object_handle_service::destroy( win_object_handle_service::implementation_type& impl) { mutex::scoped_lock lock(mutex_); // Remove implementation from linked list of all implementations. if (impl_list_ == &impl) impl_list_ = impl.next_; if (impl.prev_) impl.prev_->next_ = impl.next_; if (impl.next_) impl.next_->prev_= impl.prev_; impl.next_ = 0; impl.prev_ = 0; if (is_open(impl)) { ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle", &impl, reinterpret_cast(impl.wait_handle_), "close")); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ::CloseHandle(impl.handle_); impl.handle_ = INVALID_HANDLE_VALUE; scheduler_.post_deferred_completions(ops); } } asio::error_code win_object_handle_service::assign( win_object_handle_service::implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.handle_ = handle; ec = asio::error_code(); return ec; } asio::error_code win_object_handle_service::close( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle", &impl, reinterpret_cast(impl.wait_handle_), "close")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { impl.op_queue_.pop(); op->ec_ = asio::error::operation_aborted; completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); if (::CloseHandle(impl.handle_)) { impl.handle_ = INVALID_HANDLE_VALUE; ec = asio::error_code(); } else { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } scheduler_.post_deferred_completions(completed_ops); } else { ec = asio::error_code(); } return ec; } asio::error_code win_object_handle_service::cancel( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { if (is_open(impl)) { ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle", &impl, reinterpret_cast(impl.wait_handle_), "cancel")); mutex::scoped_lock lock(mutex_); HANDLE wait_handle = impl.wait_handle_; impl.wait_handle_ = INVALID_HANDLE_VALUE; op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = asio::error::operation_aborted; impl.op_queue_.pop(); completed_ops.push(op); } // We must not hold the lock while calling UnregisterWaitEx. This is // because the registered callback function might be invoked while we are // waiting for UnregisterWaitEx to complete. lock.unlock(); if (wait_handle != INVALID_HANDLE_VALUE) ::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE); ec = asio::error_code(); scheduler_.post_deferred_completions(completed_ops); } else { ec = asio::error::bad_descriptor; } return ec; } void win_object_handle_service::wait( win_object_handle_service::implementation_type& impl, asio::error_code& ec) { switch (::WaitForSingleObject(impl.handle_, INFINITE)) { case WAIT_FAILED: { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); break; } case WAIT_OBJECT_0: case WAIT_ABANDONED: default: ec = asio::error_code(); break; } } void win_object_handle_service::start_wait_op( win_object_handle_service::implementation_type& impl, wait_op* op) { scheduler_.work_started(); if (is_open(impl)) { mutex::scoped_lock lock(mutex_); if (!shutdown_) { impl.op_queue_.push(op); // Only the first operation to be queued gets to register a wait callback. // Subsequent operations have to wait for the first to finish. if (impl.op_queue_.front() == op) register_wait_callback(impl, lock); } else { lock.unlock(); scheduler_.post_deferred_completion(op); } } else { op->ec_ = asio::error::bad_descriptor; scheduler_.post_deferred_completion(op); } } void win_object_handle_service::register_wait_callback( win_object_handle_service::implementation_type& impl, mutex::scoped_lock& lock) { lock.lock(); if (!RegisterWaitForSingleObject(&impl.wait_handle_, impl.handle_, &win_object_handle_service::wait_callback, &impl, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); op_queue completed_ops; while (wait_op* op = impl.op_queue_.front()) { op->ec_ = ec; impl.op_queue_.pop(); completed_ops.push(op); } lock.unlock(); scheduler_.post_deferred_completions(completed_ops); } } void win_object_handle_service::wait_callback(PVOID param, BOOLEAN) { implementation_type* impl = static_cast(param); mutex::scoped_lock lock(impl->owner_->mutex_); if (impl->wait_handle_ != INVALID_HANDLE_VALUE) { ::UnregisterWaitEx(impl->wait_handle_, NULL); impl->wait_handle_ = INVALID_HANDLE_VALUE; } if (wait_op* op = impl->op_queue_.front()) { op_queue completed_ops; op->ec_ = asio::error_code(); impl->op_queue_.pop(); completed_ops.push(op); if (!impl->op_queue_.empty()) { if (!RegisterWaitForSingleObject(&impl->wait_handle_, impl->handle_, &win_object_handle_service::wait_callback, param, INFINITE, WT_EXECUTEONLYONCE)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); while ((op = impl->op_queue_.front()) != 0) { op->ec_ = ec; impl->op_queue_.pop(); completed_ops.push(op); } } } scheduler_impl& sched = impl->owner_->scheduler_; lock.unlock(); sched.post_deferred_completions(completed_ops); } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/winrt_timer_scheduler.ipp000644 000164 177776 00000005432 15107057155 025724 0ustar00jenkinsnogroup000000 000000 // // detail/impl/winrt_timer_scheduler.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/winrt_timer_scheduler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { winrt_timer_scheduler::winrt_timer_scheduler(execution_context& context) : execution_context_service_base(context), scheduler_(use_service(context)), mutex_(), event_(), timer_queues_(), thread_(0), stop_thread_(false), shutdown_(false) { thread_ = new asio::detail::thread( bind_handler(&winrt_timer_scheduler::call_run_thread, this)); } winrt_timer_scheduler::~winrt_timer_scheduler() { shutdown(); } void winrt_timer_scheduler::shutdown() { asio::detail::mutex::scoped_lock lock(mutex_); shutdown_ = true; stop_thread_ = true; event_.signal(lock); lock.unlock(); if (thread_) { thread_->join(); delete thread_; thread_ = 0; } op_queue ops; timer_queues_.get_all_timers(ops); scheduler_.abandon_operations(ops); } void winrt_timer_scheduler::notify_fork(execution_context::fork_event) { } void winrt_timer_scheduler::init_task() { } void winrt_timer_scheduler::run_thread() { asio::detail::mutex::scoped_lock lock(mutex_); while (!stop_thread_) { const long max_wait_duration = 5 * 60 * 1000000; long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration); event_.wait_for_usec(lock, wait_duration); event_.clear(lock); op_queue ops; timer_queues_.get_ready_timers(ops); if (!ops.empty()) { lock.unlock(); scheduler_.post_deferred_completions(ops); lock.lock(); } } } void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler) { scheduler->run_thread(); } void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.insert(&queue); } void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(mutex_); timer_queues_.erase(&queue); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP galera-4-26.4.25/asio/asio/detail/impl/select_reactor.hpp000644 000164 177776 00000005415 15107057155 024321 0ustar00jenkinsnogroup000000 000000 // // detail/impl/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void select_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void select_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void select_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { asio::detail::mutex::scoped_lock lock(mutex_); if (shutdown_) { scheduler_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); scheduler_.work_started(); if (earliest) interrupter_.interrupt(); } template std::size_t select_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); scheduler_.post_deferred_completions(ops); return n; } template void select_reactor::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source) { asio::detail::mutex::scoped_lock lock(mutex_); op_queue ops; queue.cancel_timer(target, ops); queue.move_timer(target, source); lock.unlock(); scheduler_.post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/impl/win_static_mutex.ipp000644 000164 177776 00000006570 15107057155 024715 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_static_mutex.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include #include "asio/detail/throw_error.hpp" #include "asio/detail/win_static_mutex.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { void win_static_mutex::init() { int error = do_init(); asio::error_code ec(error, asio::error::get_system_category()); asio::detail::throw_error(ec, "static_mutex"); } int win_static_mutex::do_init() { using namespace std; // For sprintf. wchar_t mutex_name[128]; #if defined(ASIO_HAS_SECURE_RTL) swprintf_s( #else // defined(ASIO_HAS_SECURE_RTL) _snwprintf( #endif // defined(ASIO_HAS_SECURE_RTL) mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p", static_cast(::GetCurrentProcessId()), this); #if defined(ASIO_WINDOWS_APP) HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0); #else // defined(ASIO_WINDOWS_APP) HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name); #endif // defined(ASIO_WINDOWS_APP) DWORD last_error = ::GetLastError(); if (mutex == 0) return ::GetLastError(); if (last_error == ERROR_ALREADY_EXISTS) { #if defined(ASIO_WINDOWS_APP) ::WaitForSingleObjectEx(mutex, INFINITE, false); #else // defined(ASIO_WINDOWS_APP) ::WaitForSingleObject(mutex, INFINITE); #endif // defined(ASIO_WINDOWS_APP) } if (initialised_) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } #if defined(__MINGW32__) // Not sure if MinGW supports structured exception handling, so for now // we'll just call the Windows API and hope. # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif #else __try { # if defined(UNDER_CE) ::InitializeCriticalSection(&crit_section_); # elif defined(ASIO_WINDOWS_APP) if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # else if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000)) { last_error = ::GetLastError(); ::ReleaseMutex(mutex); ::CloseHandle(mutex); return last_error; } # endif } __except(GetExceptionCode() == STATUS_NO_MEMORY ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { ::ReleaseMutex(mutex); ::CloseHandle(mutex); return ERROR_OUTOFMEMORY; } #endif initialised_ = true; ::ReleaseMutex(mutex); ::CloseHandle(mutex); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP galera-4-26.4.25/asio/asio/detail/impl/reactive_serial_port_service.ipp000644 000164 177776 00000010005 15107057155 027240 0ustar00jenkinsnogroup000000 000000 // // detail/impl/reactive_serial_port_service.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/reactive_serial_port_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { reactive_serial_port_service::reactive_serial_port_service( execution_context& context) : execution_context_service_base(context), descriptor_service_(context) { } void reactive_serial_port_service::shutdown() { descriptor_service_.shutdown(); } asio::error_code reactive_serial_port_service::open( reactive_serial_port_service::implementation_type& impl, const std::string& device, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } descriptor_ops::state_type state = 0; int fd = descriptor_ops::open(device.c_str(), O_RDWR | O_NONBLOCK | O_NOCTTY, ec); if (fd < 0) return ec; int s = descriptor_ops::fcntl(fd, F_GETFL, ec); if (s >= 0) s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec); if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // Set up default serial port options. termios ios; errno = 0; s = descriptor_ops::error_wrapper(::tcgetattr(fd, &ios), ec); if (s >= 0) { #if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE) ::cfmakeraw(&ios); #else ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); ios.c_oflag &= ~OPOST; ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); ios.c_cflag &= ~(CSIZE | PARENB); ios.c_cflag |= CS8; #endif ios.c_iflag |= IGNPAR; ios.c_cflag |= CREAD | CLOCAL; errno = 0; s = descriptor_ops::error_wrapper(::tcsetattr(fd, TCSANOW, &ios), ec); } if (s < 0) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); return ec; } // We're done. Take ownership of the serial port descriptor. if (descriptor_service_.assign(impl, fd, ec)) { asio::error_code ignored_ec; descriptor_ops::close(fd, state, ignored_ec); } return ec; } asio::error_code reactive_serial_port_service::do_set_option( reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::store_function_type store, const void* option, asio::error_code& ec) { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; if (store(option, ios, ec)) return ec; errno = 0; descriptor_ops::error_wrapper(::tcsetattr( descriptor_service_.native_handle(impl), TCSANOW, &ios), ec); return ec; } asio::error_code reactive_serial_port_service::do_get_option( const reactive_serial_port_service::implementation_type& impl, reactive_serial_port_service::load_function_type load, void* option, asio::error_code& ec) const { termios ios; errno = 0; descriptor_ops::error_wrapper(::tcgetattr( descriptor_service_.native_handle(impl), &ios), ec); if (ec) return ec; return load(option, ios, ec); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP galera-4-26.4.25/asio/asio/detail/impl/win_tss_ptr.ipp000644 000164 177776 00000002450 15107057155 023673 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_tss_ptr.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/throw_error.hpp" #include "asio/detail/win_tss_ptr.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD win_tss_ptr_create() { #if defined(UNDER_CE) const DWORD out_of_indexes = 0xFFFFFFFF; #else const DWORD out_of_indexes = TLS_OUT_OF_INDEXES; #endif DWORD tss_key = ::TlsAlloc(); if (tss_key == out_of_indexes) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "tss"); } return tss_key; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP galera-4-26.4.25/asio/asio/detail/impl/strand_executor_service.hpp000644 000164 177776 00000012754 15107057155 026260 0ustar00jenkinsnogroup000000 000000 // // detail/impl/strand_executor_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP #define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/call_stack.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/recycling_allocator.hpp" #include "asio/executor_work_guard.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class strand_executor_service::invoker { public: invoker(const implementation_type& impl, Executor& ex) : impl_(impl), work_(ex) { } invoker(const invoker& other) : impl_(other.impl_), work_(other.work_) { } #if defined(ASIO_HAS_MOVE) invoker(invoker&& other) : impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)), work_(ASIO_MOVE_CAST(executor_work_guard)(other.work_)) { } #endif // defined(ASIO_HAS_MOVE) struct on_invoker_exit { invoker* this_; ~on_invoker_exit() { this_->impl_->mutex_->lock(); this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_); bool more_handlers = this_->impl_->locked_ = !this_->impl_->ready_queue_.empty(); this_->impl_->mutex_->unlock(); if (more_handlers) { Executor ex(this_->work_.get_executor()); recycling_allocator allocator; ex.post(ASIO_MOVE_CAST(invoker)(*this_), allocator); } } }; void operator()() { // Indicate that this strand is executing on the current thread. call_stack::context ctx(impl_.get()); // Ensure the next handler, if any, is scheduled on block exit. on_invoker_exit on_exit = { this }; (void)on_exit; // Run all ready handlers. No lock is required since the ready queue is // accessed only within the strand. asio::error_code ec; while (scheduler_operation* o = impl_->ready_queue_.front()) { impl_->ready_queue_.pop(); o->complete(impl_.get(), ec, 0); } } private: implementation_type impl_; executor_work_guard work_; }; template void strand_executor_service::dispatch(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) { typedef typename decay::type function_type; // If we are already in the strand then the function can run immediately. if (call_stack::contains(impl.get())) { // Make a local, non-const copy of the function. function_type tmp(ASIO_MOVE_CAST(Function)(function)); fenced_block b(fenced_block::full); asio_handler_invoke_helpers::invoke(tmp, tmp); return; } // Allocate and construct an operation to wrap the function. typedef executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, "strand_executor", impl.get(), 0, "dispatch")); // Add the function to the strand and schedule the strand if required. bool first = enqueue(impl, p.p); p.v = p.p = 0; if (first) ex.dispatch(invoker(impl, ex), a); } // Request invocation of the given function and return immediately. template void strand_executor_service::post(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, "strand_executor", impl.get(), 0, "post")); // Add the function to the strand and schedule the strand if required. bool first = enqueue(impl, p.p); p.v = p.p = 0; if (first) ex.post(invoker(impl, ex), a); } // Request invocation of the given function and return immediately. template void strand_executor_service::defer(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a) { typedef typename decay::type function_type; // Allocate and construct an operation to wrap the function. typedef executor_op op; typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 }; p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a); ASIO_HANDLER_CREATION((impl->service_->context(), *p.p, "strand_executor", impl.get(), 0, "defer")); // Add the function to the strand and schedule the strand if required. bool first = enqueue(impl, p.p); p.v = p.p = 0; if (first) ex.defer(invoker(impl, ex), a); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/impl/win_iocp_io_context.ipp000644 000164 177776 00000036663 15107057155 025377 0ustar00jenkinsnogroup000000 000000 // // detail/impl/win_iocp_io_context.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP #define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/win_iocp_io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_iocp_io_context::thread_function { explicit thread_function(win_iocp_io_context* s) : this_(s) { } void operator()() { asio::error_code ec; this_->run(ec); } win_iocp_io_context* this_; }; struct win_iocp_io_context::work_finished_on_block_exit { ~work_finished_on_block_exit() { io_context_->work_finished(); } win_iocp_io_context* io_context_; }; struct win_iocp_io_context::timer_thread_function { void operator()() { while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0) { if (::WaitForSingleObject(io_context_->waitable_timer_.handle, INFINITE) == WAIT_OBJECT_0) { ::InterlockedExchange(&io_context_->dispatch_required_, 1); ::PostQueuedCompletionStatus(io_context_->iocp_.handle, 0, wake_for_dispatch, 0); } } } win_iocp_io_context* io_context_; }; win_iocp_io_context::win_iocp_io_context( asio::execution_context& ctx, int concurrency_hint, bool own_thread) : execution_context_service_base(ctx), iocp_(), outstanding_work_(0), stopped_(0), stop_event_posted_(0), shutdown_(0), gqcs_timeout_(get_gqcs_timeout()), dispatch_required_(0), concurrency_hint_(concurrency_hint) { ASIO_HANDLER_TRACKING_INIT; iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, static_cast(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0))); if (!iocp_.handle) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "iocp"); } if (own_thread) { ::InterlockedIncrement(&outstanding_work_); thread_.reset(new asio::detail::thread(thread_function(this))); } } win_iocp_io_context::~win_iocp_io_context() { if (thread_.get()) { thread_->join(); thread_.reset(); } } void win_iocp_io_context::shutdown() { ::InterlockedExchange(&shutdown_, 1); if (timer_thread_.get()) { LARGE_INTEGER timeout; timeout.QuadPart = 1; ::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE); } if (thread_.get()) { thread_->join(); thread_.reset(); ::InterlockedDecrement(&outstanding_work_); } while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0) { op_queue ops; timer_queues_.get_all_timers(ops); ops.push(completed_ops_); if (!ops.empty()) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } else { DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, gqcs_timeout_); if (overlapped) { ::InterlockedDecrement(&outstanding_work_); static_cast(overlapped)->destroy(); } } } if (timer_thread_.get()) timer_thread_->join(); } asio::error_code win_iocp_io_context::register_handle( HANDLE handle, asio::error_code& ec) { if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0) { DWORD last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); } else { ec = asio::error_code(); } return ec; } size_t win_iocp_io_context::run(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(INFINITE, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_context::run_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(INFINITE, ec); } size_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), ec); } size_t win_iocp_io_context::poll(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); size_t n = 0; while (do_one(0, ec)) if (n != (std::numeric_limits::max)()) ++n; return n; } size_t win_iocp_io_context::poll_one(asio::error_code& ec) { if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0) { stop(); ec = asio::error_code(); return 0; } win_iocp_thread_info this_thread; thread_call_stack::context ctx(this, this_thread); return do_one(0, ec); } void win_iocp_io_context::stop() { if (::InterlockedExchange(&stopped_, 1) == 0) { if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "pqcs"); } } } } void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op) { // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_context::post_deferred_completions( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); // Flag the operation as ready. op->ready_ = 1; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); completed_ops_.push(ops); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_context::abandon_operations( op_queue& ops) { while (win_iocp_operation* op = ops.front()) { ops.pop(); ::InterlockedDecrement(&outstanding_work_); op->destroy(); } } void win_iocp_io_context::on_pending(win_iocp_operation* op) { if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } } void win_iocp_io_context::on_completion(win_iocp_operation* op, DWORD last_error, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast( &asio::error::get_system_category()); op->Offset = last_error; op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } void win_iocp_io_context::on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred) { // Flag that the operation is ready for invocation. op->ready_ = 1; // Store results in the OVERLAPPED structure. op->Internal = reinterpret_cast(&ec.category()); op->Offset = ec.value(); op->OffsetHigh = bytes_transferred; // Enqueue the operation on the I/O completion port. if (!::PostQueuedCompletionStatus(iocp_.handle, 0, overlapped_contains_result, op)) { // Out of resources. Put on completed queue instead. mutex::scoped_lock lock(dispatch_mutex_); completed_ops_.push(op); ::InterlockedExchange(&dispatch_required_, 1); } } size_t win_iocp_io_context::do_one(DWORD msec, asio::error_code& ec) { for (;;) { // Try to acquire responsibility for dispatching timers and completed ops. if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1) { mutex::scoped_lock lock(dispatch_mutex_); // Dispatch pending timers and operations. op_queue ops; ops.push(completed_ops_); timer_queues_.get_ready_timers(ops); post_deferred_completions(ops); update_timeout(); } // Get the next operation from the queue. DWORD bytes_transferred = 0; dword_ptr_t completion_key = 0; LPOVERLAPPED overlapped = 0; ::SetLastError(0); BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred, &completion_key, &overlapped, msec < gqcs_timeout_ ? msec : gqcs_timeout_); DWORD last_error = ::GetLastError(); if (overlapped) { win_iocp_operation* op = static_cast(overlapped); asio::error_code result_ec(last_error, asio::error::get_system_category()); // We may have been passed the last_error and bytes_transferred in the // OVERLAPPED structure itself. if (completion_key == overlapped_contains_result) { result_ec = asio::error_code(static_cast(op->Offset), *reinterpret_cast(op->Internal)); bytes_transferred = op->OffsetHigh; } // Otherwise ensure any result has been saved into the OVERLAPPED // structure. else { op->Internal = reinterpret_cast(&result_ec.category()); op->Offset = result_ec.value(); op->OffsetHigh = bytes_transferred; } // Dispatch the operation only if ready. The operation may not be ready // if the initiating function (e.g. a call to WSARecv) has not yet // returned. This is because the initiating function still wants access // to the operation's OVERLAPPED structure. if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1) { // Ensure the count of outstanding work is decremented on block exit. work_finished_on_block_exit on_exit = { this }; (void)on_exit; op->complete(this, result_ec, bytes_transferred); ec = asio::error_code(); return 1; } } else if (!ok) { if (last_error != WAIT_TIMEOUT) { ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } // If we're waiting indefinitely we need to keep going until we get a // real handler. if (msec == INFINITE) continue; ec = asio::error_code(); return 0; } else if (completion_key == wake_for_dispatch) { // We have been woken up to try to acquire responsibility for dispatching // timers and completed operations. } else { // Indicate that there is no longer an in-flight stop event. ::InterlockedExchange(&stop_event_posted_, 0); // The stopped_ flag is always checked to ensure that any leftover // stop events from a previous run invocation are ignored. if (::InterlockedExchangeAdd(&stopped_, 0) != 0) { // Wake up next thread that is blocked on GetQueuedCompletionStatus. if (::InterlockedExchange(&stop_event_posted_, 1) == 0) { if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0)) { last_error = ::GetLastError(); ec = asio::error_code(last_error, asio::error::get_system_category()); return 0; } } ec = asio::error_code(); return 0; } } } } DWORD win_iocp_io_context::get_gqcs_timeout() { OSVERSIONINFOEX osvi; ZeroMemory(&osvi, sizeof(osvi)); osvi.dwOSVersionInfoSize = sizeof(osvi); osvi.dwMajorVersion = 6ul; const uint64_t condition_mask = ::VerSetConditionMask( 0, VER_MAJORVERSION, VER_GREATER_EQUAL); if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask)) return INFINITE; return default_gqcs_timeout; } void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.insert(&queue); if (!waitable_timer_.handle) { waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0); if (waitable_timer_.handle == 0) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "timer"); } LARGE_INTEGER timeout; timeout.QuadPart = -max_timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } if (!timer_thread_.get()) { timer_thread_function thread_function = { this }; timer_thread_.reset(new thread(thread_function, 65536)); } } void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue) { mutex::scoped_lock lock(dispatch_mutex_); timer_queues_.erase(&queue); } void win_iocp_io_context::update_timeout() { if (timer_thread_.get()) { // There's no point updating the waitable timer if the new timeout period // exceeds the maximum timeout. In that case, we might as well wait for the // existing period of the timer to expire. long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec); if (timeout_usec < max_timeout_usec) { LARGE_INTEGER timeout; timeout.QuadPart = -timeout_usec; timeout.QuadPart *= 10; ::SetWaitableTimer(waitable_timer_.handle, &timeout, max_timeout_msec, 0, 0, FALSE); } } } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP galera-4-26.4.25/asio/asio/detail/impl/kqueue_reactor.hpp000644 000164 177776 00000004707 15107057155 024344 0ustar00jenkinsnogroup000000 000000 // // detail/impl/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template void kqueue_reactor::add_timer_queue(timer_queue& queue) { do_add_timer_queue(queue); } // Remove a timer queue from the reactor. template void kqueue_reactor::remove_timer_queue(timer_queue& queue) { do_remove_timer_queue(queue); } template void kqueue_reactor::schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op) { mutex::scoped_lock lock(mutex_); if (shutdown_) { scheduler_.post_immediate_completion(op, false); return; } bool earliest = queue.enqueue_timer(time, timer, op); scheduler_.work_started(); if (earliest) interrupt(); } template std::size_t kqueue_reactor::cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled) { mutex::scoped_lock lock(mutex_); op_queue ops; std::size_t n = queue.cancel_timer(timer, ops, max_cancelled); lock.unlock(); scheduler_.post_deferred_completions(ops); return n; } template void kqueue_reactor::move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source) { mutex::scoped_lock lock(mutex_); op_queue ops; queue.cancel_timer(target, ops); queue.move_timer(target, source); lock.unlock(); scheduler_.post_deferred_completions(ops); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/socket_holder.hpp000644 000164 177776 00000003773 15107057155 023214 0ustar00jenkinsnogroup000000 000000 // // detail/socket_holder.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_HOLDER_HPP #define ASIO_DETAIL_SOCKET_HOLDER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Implement the resource acquisition is initialisation idiom for sockets. class socket_holder : private noncopyable { public: // Construct as an uninitialised socket. socket_holder() : socket_(invalid_socket) { } // Construct to take ownership of the specified socket. explicit socket_holder(socket_type s) : socket_(s) { } // Destructor. ~socket_holder() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); } } // Get the underlying socket. socket_type get() const { return socket_; } // Reset to an uninitialised socket. void reset() { if (socket_ != invalid_socket) { asio::error_code ec; socket_ops::state_type state = 0; socket_ops::close(socket_, state, true, ec); socket_ = invalid_socket; } } // Reset to take ownership of the specified socket. void reset(socket_type s) { reset(); socket_ = s; } // Release ownership of the socket. socket_type release() { socket_type tmp = socket_; socket_ = invalid_socket; return tmp; } private: // The underlying socket. socket_type socket_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_HOLDER_HPP galera-4-26.4.25/asio/asio/detail/signal_set_service.hpp000644 000164 177776 00000014701 15107057155 024230 0ustar00jenkinsnogroup000000 000000 // // detail/signal_set_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #define ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/signal_handler.hpp" #include "asio/detail/signal_op.hpp" #include "asio/detail/socket_types.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) # include "asio/detail/reactor.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(NSIG) && (NSIG > 0) enum { max_signal_number = NSIG }; #else enum { max_signal_number = 128 }; #endif extern ASIO_DECL struct signal_state* get_signal_state(); extern "C" ASIO_DECL void asio_signal_handler(int signal_number); class signal_set_service : public execution_context_service_base { public: // Type used for tracking an individual signal registration. class registration { public: // Default constructor. registration() : signal_number_(0), queue_(0), undelivered_(0), next_in_table_(0), prev_in_table_(0), next_in_set_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The signal number that is registered. int signal_number_; // The waiting signal handlers. op_queue* queue_; // The number of undelivered signals. std::size_t undelivered_; // Pointers to adjacent registrations in the registrations_ table. registration* next_in_table_; registration* prev_in_table_; // Link to next registration in the signal set. registration* next_in_set_; }; // The implementation type of the signal_set. class implementation_type { public: // Default constructor. implementation_type() : signals_(0) { } private: // Only this service will have access to the internal values. friend class signal_set_service; // The pending signal handlers. op_queue queue_; // Linked list of registered signals. registration* signals_; }; // Constructor. ASIO_DECL signal_set_service(execution_context& context); // Destructor. ASIO_DECL ~signal_set_service(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Perform fork-related housekeeping. ASIO_DECL void notify_fork( asio::execution_context::fork_event fork_ev); // Construct a new signal_set implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a signal_set implementation. ASIO_DECL void destroy(implementation_type& impl); // Add a signal to a signal_set. ASIO_DECL asio::error_code add(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove a signal to a signal_set. ASIO_DECL asio::error_code remove(implementation_type& impl, int signal_number, asio::error_code& ec); // Remove all signals from a signal_set. ASIO_DECL asio::error_code clear(implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the signal set. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Start an asynchronous operation to wait for a signal to be delivered. template void async_wait(implementation_type& impl, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef signal_handler op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "signal_set", &impl, 0, "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } // Deliver notification that a particular signal occurred. ASIO_DECL static void deliver_signal(int signal_number); private: // Helper function to add a service to the global signal state. ASIO_DECL static void add_service(signal_set_service* service); // Helper function to remove a service from the global signal state. ASIO_DECL static void remove_service(signal_set_service* service); // Helper function to create the pipe descriptors. ASIO_DECL static void open_descriptors(); // Helper function to close the pipe descriptors. ASIO_DECL static void close_descriptors(); // Helper function to start a wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, signal_op* op); // The scheduler used for dispatching handlers. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) // The type used for registering for pipe reactor notifications. class pipe_read_op; // The reactor used for waiting for pipe readiness. reactor& reactor_; // The per-descriptor reactor data used for the pipe. reactor::per_descriptor_data reactor_data_; #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) // A mapping from signal number to the registered signal sets. registration* registrations_[max_signal_number]; // Pointers to adjacent services in linked list. signal_set_service* next_; signal_set_service* prev_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/signal_set_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SIGNAL_SET_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/push_options.hpp000644 000164 177776 00000010541 15107057155 023110 0ustar00jenkinsnogroup000000 000000 // // detail/push_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility push (default) # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility push (default) # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (push, 8) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if !defined(ASIO_DISABLE_OBJC_WORKAROUND) # if !defined(Protocol) && !defined(id) # define Protocol cpp_Protocol # define id cpp_id # define ASIO_OBJC_WORKAROUND # endif # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility push (default) # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if (__GNUC__ >= 7) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wimplicit-fallthrough" # endif // (__GNUC__ >= 7) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option push -a8 -b -Ve- -Vx- -w-inl -vi- # pragma nopushoptwarn # pragma nopackwarning # if !defined(__MT__) # error Multithreaded RTL must be selected. # endif // !defined(__MT__) #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (disable:4103) # pragma warning (push) # pragma warning (disable:4127) # pragma warning (disable:4180) # pragma warning (disable:4244) # pragma warning (disable:4355) # pragma warning (disable:4510) # pragma warning (disable:4512) # pragma warning (disable:4610) # pragma warning (disable:4675) # if (_MSC_VER < 1600) // Visual Studio 2008 generates spurious warnings about unused parameters. # pragma warning (disable:4100) # endif // (_MSC_VER < 1600) # if defined(_M_IX86) && defined(_Wp64) // The /Wp64 option is broken. If you want to check 64 bit portability, use a // 64 bit compiler! # pragma warning (disable:4311) # pragma warning (disable:4312) # endif // defined(_M_IX86) && defined(_Wp64) # pragma pack (push, 8) // Note that if the /Og optimisation flag is enabled with MSVC6, the compiler // has a tendency to incorrectly optimise away some calls to member template // functions, even though those functions contain code that should not be // optimised away! Therefore we will always disable this optimisation option // for the MSVC6 compiler. # if (_MSC_VER < 1300) # pragma optimize ("g", off) # endif # if !defined(_MT) # error Multithreaded RTL must be selected. # endif // !defined(_MT) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if !defined(ASIO_DISABLE_CLR_WORKAROUND) # if !defined(generic) # define generic cpp_generic # define ASIO_CLR_WORKAROUND # endif # endif # endif #endif galera-4-26.4.25/asio/asio/detail/deadline_timer_service.hpp000644 000164 177776 00000020351 15107057155 025043 0ustar00jenkinsnogroup000000 000000 // // detail/deadline_timer_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #define ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/timer_queue_ptime.hpp" #include "asio/detail/timer_scheduler.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/detail/wait_op.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include # include #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class deadline_timer_service : public execution_context_service_base > { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // The implementation type of the timer. This type is dependent on the // underlying implementation of the timer service. struct implementation_type : private asio::detail::noncopyable { time_type expiry; bool might_have_pending_waits; typename timer_queue::per_timer_data timer_data; }; // Constructor. deadline_timer_service(execution_context& context) : execution_context_service_base< deadline_timer_service >(context), scheduler_(asio::use_service(context)) { scheduler_.init_task(); scheduler_.add_timer_queue(timer_queue_); } // Destructor. ~deadline_timer_service() { scheduler_.remove_timer_queue(timer_queue_); } // Destroy all user-defined handler objects owned by the service. void shutdown() { } // Construct a new timer implementation. void construct(implementation_type& impl) { impl.expiry = time_type(); impl.might_have_pending_waits = false; } // Destroy a timer implementation. void destroy(implementation_type& impl) { asio::error_code ec; cancel(impl, ec); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { scheduler_.move_timer(timer_queue_, impl.timer_data, other_impl.timer_data); impl.expiry = other_impl.expiry; other_impl.expiry = time_type(); impl.might_have_pending_waits = other_impl.might_have_pending_waits; other_impl.might_have_pending_waits = false; } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, deadline_timer_service& other_service, implementation_type& other_impl) { if (this != &other_service) if (impl.might_have_pending_waits) scheduler_.cancel_timer(timer_queue_, impl.timer_data); other_service.scheduler_.move_timer(other_service.timer_queue_, impl.timer_data, other_impl.timer_data); impl.expiry = other_impl.expiry; other_impl.expiry = time_type(); impl.might_have_pending_waits = other_impl.might_have_pending_waits; other_impl.might_have_pending_waits = false; } // Cancel any asynchronous wait operations associated with the timer. std::size_t cancel(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION((scheduler_.context(), "deadline_timer", &impl, 0, "cancel")); std::size_t count = scheduler_.cancel_timer(timer_queue_, impl.timer_data); impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Cancels one asynchronous wait operation associated with the timer. std::size_t cancel_one(implementation_type& impl, asio::error_code& ec) { if (!impl.might_have_pending_waits) { ec = asio::error_code(); return 0; } ASIO_HANDLER_OPERATION((scheduler_.context(), "deadline_timer", &impl, 0, "cancel_one")); std::size_t count = scheduler_.cancel_timer( timer_queue_, impl.timer_data, 1); if (count == 0) impl.might_have_pending_waits = false; ec = asio::error_code(); return count; } // Get the expiry time for the timer as an absolute time. time_type expiry(const implementation_type& impl) const { return impl.expiry; } // Get the expiry time for the timer as an absolute time. time_type expires_at(const implementation_type& impl) const { return impl.expiry; } // Get the expiry time for the timer relative to now. duration_type expires_from_now(const implementation_type& impl) const { return Time_Traits::subtract(this->expiry(impl), Time_Traits::now()); } // Set the expiry time for the timer as an absolute time. std::size_t expires_at(implementation_type& impl, const time_type& expiry_time, asio::error_code& ec) { std::size_t count = cancel(impl, ec); impl.expiry = expiry_time; ec = asio::error_code(); return count; } // Set the expiry time for the timer relative to now. std::size_t expires_after(implementation_type& impl, const duration_type& expiry_time, asio::error_code& ec) { return expires_at(impl, Time_Traits::add(Time_Traits::now(), expiry_time), ec); } // Set the expiry time for the timer relative to now. std::size_t expires_from_now(implementation_type& impl, const duration_type& expiry_time, asio::error_code& ec) { return expires_at(impl, Time_Traits::add(Time_Traits::now(), expiry_time), ec); } // Perform a blocking wait on the timer. void wait(implementation_type& impl, asio::error_code& ec) { time_type now = Time_Traits::now(); ec = asio::error_code(); while (Time_Traits::less_than(now, impl.expiry) && !ec) { this->do_wait(Time_Traits::to_posix_duration( Time_Traits::subtract(impl.expiry, now)), ec); now = Time_Traits::now(); } } // Start an asynchronous wait on the timer. template void async_wait(implementation_type& impl, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); impl.might_have_pending_waits = true; ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "deadline_timer", &impl, 0, "async_wait")); scheduler_.schedule_timer(timer_queue_, impl.expiry, impl.timer_data, p.p); p.v = p.p = 0; } private: // Helper function to wait given a duration type. The duration type should // either be of type boost::posix_time::time_duration, or implement the // required subset of its interface. template void do_wait(const Duration& timeout, asio::error_code& ec) { #if defined(ASIO_WINDOWS_RUNTIME) std::this_thread::sleep_for( std::chrono::seconds(timeout.total_seconds()) + std::chrono::microseconds(timeout.total_microseconds())); ec = asio::error_code(); #else // defined(ASIO_WINDOWS_RUNTIME) ::timeval tv; tv.tv_sec = timeout.total_seconds(); tv.tv_usec = timeout.total_microseconds() % 1000000; socket_ops::select(0, 0, 0, 0, &tv, ec); #endif // defined(ASIO_WINDOWS_RUNTIME) } // The queue of timers. timer_queue timer_queue_; // The object that schedules and executes timers. Usually a reactor. timer_scheduler& scheduler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/conditionally_enabled_mutex.hpp000644 000164 177776 00000005713 15107057155 026127 0ustar00jenkinsnogroup000000 000000 // // detail/conditionally_enabled_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP #define ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Mutex adapter used to conditionally enable or disable locking. class conditionally_enabled_mutex : private noncopyable { public: // Helper class to lock and unlock a mutex automatically. class scoped_lock : private noncopyable { public: // Tag type used to distinguish constructors. enum adopt_lock_t { adopt_lock }; // Constructor adopts a lock that is already held. scoped_lock(conditionally_enabled_mutex& m, adopt_lock_t) : mutex_(m), locked_(m.enabled_) { } // Constructor acquires the lock. explicit scoped_lock(conditionally_enabled_mutex& m) : mutex_(m) { if (m.enabled_) { mutex_.mutex_.lock(); locked_ = true; } else locked_ = false; } // Destructor releases the lock. ~scoped_lock() { if (locked_) mutex_.mutex_.unlock(); } // Explicitly acquire the lock. void lock() { if (mutex_.enabled_ && !locked_) { mutex_.mutex_.lock(); locked_ = true; } } // Explicitly release the lock. void unlock() { if (locked_) { mutex_.unlock(); locked_ = false; } } // Test whether the lock is held. bool locked() const { return locked_; } // Get the underlying mutex. asio::detail::mutex& mutex() { return mutex_.mutex_; } private: friend class conditionally_enabled_event; conditionally_enabled_mutex& mutex_; bool locked_; }; // Constructor. explicit conditionally_enabled_mutex(bool enabled) : enabled_(enabled) { } // Destructor. ~conditionally_enabled_mutex() { } // Determine whether locking is enabled. bool enabled() const { return enabled_; } // Lock the mutex. void lock() { if (enabled_) mutex_.lock(); } // Unlock the mutex. void unlock() { if (enabled_) mutex_.unlock(); } private: friend class scoped_lock; friend class conditionally_enabled_event; asio::detail::mutex mutex_; const bool enabled_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/reactor_fwd.hpp000644 000164 177776 00000002040 15107057155 022650 0ustar00jenkinsnogroup000000 000000 // // detail/reactor_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_FWD_HPP #define ASIO_DETAIL_REACTOR_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME) typedef class null_reactor reactor; #elif defined(ASIO_HAS_IOCP) typedef class select_reactor reactor; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor reactor; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor reactor; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor reactor; #else typedef class select_reactor reactor; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_REACTOR_FWD_HPP galera-4-26.4.25/asio/asio/detail/recycling_allocator.hpp000644 000164 177776 00000004506 15107057155 024401 0ustar00jenkinsnogroup000000 000000 // // detail/recycling_allocator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP #define ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/thread_context.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class recycling_allocator { public: typedef T value_type; template struct rebind { typedef recycling_allocator other; }; recycling_allocator() { } template recycling_allocator(const recycling_allocator&) { } T* allocate(std::size_t n) { typedef thread_context::thread_call_stack call_stack; void* p = thread_info_base::allocate(Purpose(), call_stack::top(), sizeof(T) * n); return static_cast(p); } void deallocate(T* p, std::size_t n) { typedef thread_context::thread_call_stack call_stack; thread_info_base::deallocate(Purpose(), call_stack::top(), p, sizeof(T) * n); } }; template class recycling_allocator { public: typedef void value_type; template struct rebind { typedef recycling_allocator other; }; recycling_allocator() { } template recycling_allocator(const recycling_allocator&) { } }; template struct get_recycling_allocator { typedef Allocator type; static type get(const Allocator& a) { return a; } }; template struct get_recycling_allocator, Purpose> { typedef recycling_allocator type; static type get(const std::allocator&) { return type(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RECYCLING_ALLOCATOR_HPP galera-4-26.4.25/asio/asio/detail/null_global.hpp000644 000164 177776 00000002304 15107057155 022646 0ustar00jenkinsnogroup000000 000000 // // detail/null_global.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_GLOBAL_HPP #define ASIO_DETAIL_NULL_GLOBAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct null_global_impl { null_global_impl() : ptr_(0) { } // Destructor automatically cleans up the global. ~null_global_impl() { delete ptr_; } static null_global_impl instance_; T* ptr_; }; template null_global_impl null_global_impl::instance_; template T& null_global() { if (null_global_impl::instance_.ptr_ == 0) null_global_impl::instance_.ptr_ = new T; return *null_global_impl::instance_.ptr_; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NULL_GLOBAL_HPP galera-4-26.4.25/asio/asio/detail/null_tss_ptr.hpp000644 000164 177776 00000002233 15107057155 023105 0ustar00jenkinsnogroup000000 000000 // // detail/null_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_TSS_PTR_HPP #define ASIO_DETAIL_NULL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_tss_ptr : private noncopyable { public: // Constructor. null_tss_ptr() : value_(0) { } // Destructor. ~null_tss_ptr() { } // Get the value. operator T*() const { return value_; } // Set the value. void operator=(T* value) { value_ = value; } private: T* value_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_TSS_PTR_HPP galera-4-26.4.25/asio/asio/detail/eventfd_select_interrupter.hpp000644 000164 177776 00000004507 15107057155 026020 0ustar00jenkinsnogroup000000 000000 // // detail/eventfd_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class eventfd_select_interrupter { public: // Constructor. ASIO_DECL eventfd_select_interrupter(); // Destructor. ASIO_DECL ~eventfd_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // 64bit value will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // 64bit non-zero value may be written to this to wake up the select which is // waiting for the other end to become readable. This descriptor will only // differ from the read descriptor when a pipe is used. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/eventfd_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EVENTFD) #endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP galera-4-26.4.25/asio/asio/detail/io_object_impl.hpp000644 000164 177776 00000012666 15107057155 023346 0ustar00jenkinsnogroup000000 000000 // // io_object_impl.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IO_OBJECT_IMPL_HPP #define ASIO_DETAIL_IO_OBJECT_IMPL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include "asio/detail/config.hpp" #include "asio/detail/io_object_executor.hpp" #include "asio/detail/type_traits.hpp" #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { class executor; namespace detail { inline bool is_native_io_executor(const io_context::executor_type&) { return true; } template inline bool is_native_io_executor(const Executor&, typename enable_if::value>::type* = 0) { return false; } template inline bool is_native_io_executor(const Executor& ex, typename enable_if::value>::type* = 0) { #if !defined (ASIO_NO_TYPEID) return ex.target_type() == typeid(io_context::executor_type); #else // !defined (ASIO_NO_TYPEID) return false; #endif // !defined (ASIO_NO_TYPEID) } template class io_object_impl { public: // The type of the service that will be used to provide I/O operations. typedef IoObjectService service_type; // The underlying implementation type of I/O object. typedef typename service_type::implementation_type implementation_type; // The type of the executor associated with the object. typedef Executor executor_type; // The type of executor to be used when implementing asynchronous operations. typedef io_object_executor implementation_executor_type; // Construct an I/O object using an executor. explicit io_object_impl(const executor_type& ex) : service_(&asio::use_service(ex.context())), implementation_executor_(ex, (is_native_io_executor)(ex)) { service_->construct(implementation_); } // Construct an I/O object using an execution context. template explicit io_object_impl(ExecutionContext& context, typename enable_if::value>::type* = 0) : service_(&asio::use_service(context)), implementation_executor_(context.get_executor(), is_same::value) { service_->construct(implementation_); } #if defined(ASIO_HAS_MOVE) // Move-construct an I/O object. io_object_impl(io_object_impl&& other) : service_(&other.get_service()), implementation_executor_(other.get_implementation_executor()) { service_->move_construct(implementation_, other.implementation_); } // Perform a converting move-construction of an I/O object. template io_object_impl(io_object_impl&& other) : service_(&asio::use_service( other.get_implementation_executor().context())), implementation_executor_(other.get_implementation_executor()) { service_->converting_move_construct(implementation_, other.get_service(), other.get_implementation()); } #endif // defined(ASIO_HAS_MOVE) // Destructor. ~io_object_impl() { service_->destroy(implementation_); } #if defined(ASIO_HAS_MOVE) // Move-assign an I/O object. io_object_impl& operator=(io_object_impl&& other) { if (this != &other) { service_->move_assign(implementation_, *other.service_, other.implementation_); implementation_executor_.~implementation_executor_type(); new (&implementation_executor_) implementation_executor_type( std::move(other.implementation_executor_)); service_ = other.service_; } return *this; } #endif // defined(ASIO_HAS_MOVE) // Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return implementation_executor_.inner_executor(); } // Get the executor to be used when implementing asynchronous operations. const implementation_executor_type& get_implementation_executor() ASIO_NOEXCEPT { return implementation_executor_; } // Get the service associated with the I/O object. service_type& get_service() { return *service_; } // Get the service associated with the I/O object. const service_type& get_service() const { return *service_; } // Get the underlying implementation of the I/O object. implementation_type& get_implementation() { return implementation_; } // Get the underlying implementation of the I/O object. const implementation_type& get_implementation() const { return implementation_; } private: // Disallow copying and copy assignment. io_object_impl(const io_object_impl&); io_object_impl& operator=(const io_object_impl&); // The service associated with the I/O object. service_type* service_; // The underlying implementation of the I/O object. implementation_type implementation_; // The associated executor. implementation_executor_type implementation_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IO_OBJECT_IMPL_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_connect_op.hpp000644 000164 177776 00000007316 15107057155 025752 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_connect_op_base : public reactor_op { public: win_iocp_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&win_iocp_socket_connect_op_base::do_perform, complete_func), socket_(socket), connect_ex_(false) { } static status do_perform(reactor_op* base) { win_iocp_socket_connect_op_base* o( static_cast(base)); return socket_ops::non_blocking_connect( o->socket_, o->ec_) ? done : not_done; } socket_type socket_; bool connect_ex_; }; template class win_iocp_socket_connect_op : public win_iocp_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_connect_op); win_iocp_socket_connect_op(socket_type socket, Handler& handler, const IoExecutor& io_ex) : win_iocp_socket_connect_op_base(socket, &win_iocp_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_connect_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); if (owner) { if (o->connect_ex_) socket_ops::complete_iocp_connect(o->socket_, ec); else ec = o->ec_; } ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_CONNECT_OP_HPP galera-4-26.4.25/asio/asio/detail/winrt_timer_scheduler.hpp000644 000164 177776 00000010701 15107057155 024755 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/event.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_timer_scheduler : public execution_context_service_base { public: // Constructor. ASIO_DECL winrt_timer_scheduler(execution_context& context); // Destructor. ASIO_DECL ~winrt_timer_scheduler(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Recreate internal descriptors following a fork. ASIO_DECL void notify_fork(execution_context::fork_event fork_ev); // Initialise the task. No effect as this class uses its own thread. ASIO_DECL void init_task(); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& to, typename timer_queue::per_timer_data& from); private: // Run the select loop in the thread. ASIO_DECL void run_thread(); // Entry point for the select loop thread. ASIO_DECL static void call_run_thread(winrt_timer_scheduler* reactor); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // The scheduler implementation used to post completions. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; // Mutex used to protect internal variables. asio::detail::mutex mutex_; // Event used to wake up background thread. asio::detail::event event_; // The timer queues. timer_queue_set timer_queues_; // The background thread that is waiting for timers to expire. asio::detail::thread* thread_; // Does the background thread need to stop. bool stop_thread_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/winrt_timer_scheduler.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_timer_scheduler.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_TIMER_SCHEDULER_HPP galera-4-26.4.25/asio/asio/detail/thread.hpp000644 000164 177776 00000002733 15107057155 021631 0ustar00jenkinsnogroup000000 000000 // // detail/thread.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_HPP #define ASIO_DETAIL_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_thread.hpp" #elif defined(ASIO_WINDOWS) # if defined(UNDER_CE) # include "asio/detail/wince_thread.hpp" # elif defined(ASIO_WINDOWS_APP) # include "asio/detail/winapp_thread.hpp" # else # include "asio/detail/win_thread.hpp" # endif #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_thread.hpp" #elif defined(ASIO_HAS_STD_THREAD) # include "asio/detail/std_thread.hpp" #else # error Only Windows, POSIX and std::thread are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_thread thread; #elif defined(ASIO_WINDOWS) # if defined(UNDER_CE) typedef wince_thread thread; # elif defined(ASIO_WINDOWS_APP) typedef winapp_thread thread; # else typedef win_thread thread; # endif #elif defined(ASIO_HAS_PTHREADS) typedef posix_thread thread; #elif defined(ASIO_HAS_STD_THREAD) typedef std_thread thread; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THREAD_HPP galera-4-26.4.25/asio/asio/detail/winrt_async_manager.hpp000644 000164 177776 00000020741 15107057155 024413 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_async_manager.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #define ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/atomic_count.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_async_manager : public execution_context_service_base { public: // Constructor. winrt_async_manager(execution_context& context) : execution_context_service_base(context), scheduler_(use_service(context)), outstanding_ops_(1) { } // Destructor. ~winrt_async_manager() { } // Destroy all user-defined handler objects owned by the service. void shutdown() { if (--outstanding_ops_ > 0) { // Block until last operation is complete. std::future f = promise_.get_future(); f.wait(); } } void sync(Windows::Foundation::IAsyncAction^ action, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); action->Completed = ref new AsyncActionCompletedHandler( [promise](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( action->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); } template TResult sync(Windows::Foundation::IAsyncOperation^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationCompletedHandler( [promise](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } template TResult sync( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, asio::error_code& ec) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto promise = std::make_shared>(); auto future = promise->get_future(); operation->Completed = ref new AsyncOperationWithProgressCompletedHandler( [promise](IAsyncOperationWithProgress^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: promise->set_value(asio::error::operation_aborted); break; case AsyncStatus::Started: break; case AsyncStatus::Error: case AsyncStatus::Completed: default: asio::error_code ec( operation->ErrorCode.Value, asio::system_category()); promise->set_value(ec); break; } }); ec = future.get(); return operation->GetResults(); } void async(Windows::Foundation::IAsyncAction^ action, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncActionCompletedHandler( [this, handler](IAsyncAction^ action, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: case AsyncStatus::Error: default: handler->ec_ = asio::error_code( action->ErrorCode.Value, asio::system_category()); break; } scheduler_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); scheduler_.work_started(); ++outstanding_ops_; action->Completed = on_completed; } template void async(Windows::Foundation::IAsyncOperation^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationCompletedHandler( [this, handler](IAsyncOperation^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } scheduler_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); scheduler_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } template void async( Windows::Foundation::IAsyncOperationWithProgress< TResult, TProgress>^ operation, winrt_async_op* handler) { using namespace Windows::Foundation; using Windows::Foundation::AsyncStatus; auto on_completed = ref new AsyncOperationWithProgressCompletedHandler( [this, handler](IAsyncOperationWithProgress< TResult, TProgress>^ operation, AsyncStatus status) { switch (status) { case AsyncStatus::Canceled: handler->ec_ = asio::error::operation_aborted; break; case AsyncStatus::Started: return; case AsyncStatus::Completed: handler->result_ = operation->GetResults(); // Fall through. case AsyncStatus::Error: default: handler->ec_ = asio::error_code( operation->ErrorCode.Value, asio::system_category()); break; } scheduler_.post_deferred_completion(handler); if (--outstanding_ops_ == 0) promise_.set_value(); }); scheduler_.work_started(); ++outstanding_ops_; operation->Completed = on_completed; } private: // The scheduler implementation used to post completed handlers. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; // Count of outstanding operations. atomic_count outstanding_ops_; // Used to keep wait for outstanding operations to complete. std::promise promise_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_ASYNC_MANAGER_HPP galera-4-26.4.25/asio/asio/detail/winrt_ssocket_service.hpp000644 000164 177776 00000015655 15107057155 025007 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_ssocket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/winrt_socket_connect_op.hpp" #include "asio/detail/winrt_ssocket_service_base.hpp" #include "asio/detail/winrt_utils.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_ssocket_service : public execution_context_service_base >, public winrt_ssocket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct implementation_type : base_implementation_type { // Default constructor. implementation_type() : base_implementation_type(), protocol_(endpoint_type().protocol()) { } // The protocol associated with the socket. protocol_type protocol_; }; // Constructor. winrt_ssocket_service(execution_context& context) : execution_context_service_base >(context), winrt_ssocket_service_base(context) { } // Destroy all user-defined handler objects owned by the service. void shutdown() { this->base_shutdown(); } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, winrt_ssocket_service& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, winrt_ssocket_service&, typename winrt_ssocket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } try { impl.socket_ = ref new Windows::Networking::Sockets::StreamSocket; impl.protocol_ = protocol; ec = asio::error_code(); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (is_open(impl)) { ec = asio::error::already_open; return ec; } impl.socket_ = native_socket; impl.protocol_ = protocol; ec = asio::error_code(); return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, true, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; endpoint.resize(do_get_endpoint(impl, false, endpoint.data(), endpoint.size(), ec)); return endpoint; } // Disable sends or receives on the socket. asio::error_code shutdown(implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { return do_set_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); } // Get a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); do_get_option(impl, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { return do_connect(impl, peer_endpoint.data(), ec); } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "socket", &impl, 0, "async_connect")); start_connect_op(impl, peer_endpoint.data(), p.p, is_continuation); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/scheduler.hpp000644 000164 177776 00000015254 15107057155 022342 0ustar00jenkinsnogroup000000 000000 // // detail/scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCHEDULER_HPP #define ASIO_DETAIL_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/execution_context.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/conditionally_enabled_event.hpp" #include "asio/detail/conditionally_enabled_mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_fwd.hpp" #include "asio/detail/scheduler_operation.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/thread_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct scheduler_thread_info; class scheduler : public execution_context_service_base, public thread_context { public: typedef scheduler_operation operation; // Constructor. Specifies the number of concurrent threads that are likely to // run the scheduler. If set to 1 certain optimisation are performed. ASIO_DECL scheduler(asio::execution_context& ctx, int concurrency_hint = 0, bool own_thread = true); // Destructor. ASIO_DECL ~scheduler(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Initialise the task, if required. ASIO_DECL void init_task(); // Run the event loop until interrupted or no more work. ASIO_DECL std::size_t run(asio::error_code& ec); // Run until interrupted or one operation is performed. ASIO_DECL std::size_t run_one(asio::error_code& ec); // Run until timeout, interrupted, or one operation is performed. ASIO_DECL std::size_t wait_one( long usec, asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL std::size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL std::size_t poll_one(asio::error_code& ec); // Interrupt the event processing loop. ASIO_DECL void stop(); // Determine whether the scheduler is stopped. ASIO_DECL bool stopped() const; // Restart in preparation for a subsequent run invocation. ASIO_DECL void restart(); // Notify that some work has started. void work_started() { ++outstanding_work_; } // Used to compensate for a forthcoming work_finished call. Must be called // from within a scheduler-owned thread. ASIO_DECL void compensating_work_started(); // Notify that some work has finished. void work_finished() { if (--outstanding_work_ == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. ASIO_DECL void post_immediate_completion( operation* op, bool is_continuation); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(operation* op); // Request invocation of the given operations and return immediately. Assumes // that work_started() was previously called for each operation. ASIO_DECL void post_deferred_completions(op_queue& ops); // Enqueue the given operation following a failed attempt to dispatch the // operation for immediate invocation. ASIO_DECL void do_dispatch(operation* op); // Process unfinished operations as part of a shutdownoperation. Assumes that // work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); // Get the concurrency hint that was used to initialise the scheduler. int concurrency_hint() const { return concurrency_hint_; } private: // The mutex type used by this scheduler. typedef conditionally_enabled_mutex mutex; // The event type used by this scheduler. typedef conditionally_enabled_event event; // Structure containing thread-specific data. typedef scheduler_thread_info thread_info; // Run at most one operation. May block. ASIO_DECL std::size_t do_run_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Run at most one operation with a timeout. May block. ASIO_DECL std::size_t do_wait_one(mutex::scoped_lock& lock, thread_info& this_thread, long usec, const asio::error_code& ec); // Poll for at most one operation. ASIO_DECL std::size_t do_poll_one(mutex::scoped_lock& lock, thread_info& this_thread, const asio::error_code& ec); // Stop the task and all idle threads. ASIO_DECL void stop_all_threads(mutex::scoped_lock& lock); // Wake a single idle thread, or the task, and always unlock the mutex. ASIO_DECL void wake_one_thread_and_unlock( mutex::scoped_lock& lock); // Helper class to run the scheduler in its own thread. class thread_function; friend class thread_function; // Helper class to perform task-related operations on block exit. struct task_cleanup; friend struct task_cleanup; // Helper class to call work-related operations on block exit. struct work_cleanup; friend struct work_cleanup; // Whether to optimise for single-threaded use cases. const bool one_thread_; // Mutex to protect access to internal data. mutable mutex mutex_; // Event to wake up blocked threads. event wakeup_event_; // The task to be run by this service. reactor* task_; // Operation object to represent the position of the task in the queue. struct task_operation : operation { task_operation() : operation(0) {} } task_operation_; // Whether the task has been interrupted. bool task_interrupted_; // The count of unfinished work. atomic_count outstanding_work_; // The queue of handlers that are ready to be delivered. op_queue op_queue_; // Flag to indicate that the dispatcher has been stopped. bool stopped_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; // The concurrency hint used to initialise the scheduler. const int concurrency_hint_; // The thread that is running the scheduler. asio::detail::thread* thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/scheduler.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SCHEDULER_HPP galera-4-26.4.25/asio/asio/detail/std_global.hpp000644 000164 177776 00000002711 15107057155 022470 0ustar00jenkinsnogroup000000 000000 // // detail/std_global.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_GLOBAL_HPP #define ASIO_DETAIL_STD_GLOBAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_CALL_ONCE) #include #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct std_global_impl { // Helper function to perform initialisation. static void do_init() { instance_.ptr_ = new T; } // Destructor automatically cleans up the global. ~std_global_impl() { delete ptr_; } static std::once_flag init_once_; static std_global_impl instance_; T* ptr_; }; template std::once_flag std_global_impl::init_once_; template std_global_impl std_global_impl::instance_; template T& std_global() { std::call_once(std_global_impl::init_once_, &std_global_impl::do_init); return *std_global_impl::instance_.ptr_; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_CALL_ONCE) #endif // ASIO_DETAIL_STD_GLOBAL_HPP galera-4-26.4.25/asio/asio/detail/wait_op.hpp000644 000164 177776 00000001643 15107057155 022023 0ustar00jenkinsnogroup000000 000000 // // detail/wait_op.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WAIT_OP_HPP #define ASIO_DETAIL_WAIT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: wait_op(func_type func) : operation(func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WAIT_OP_HPP galera-4-26.4.25/asio/asio/detail/dependent_type.hpp000644 000164 177776 00000001460 15107057155 023365 0ustar00jenkinsnogroup000000 000000 // // detail/dependent_type.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEPENDENT_TYPE_HPP #define ASIO_DETAIL_DEPENDENT_TYPE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct dependent_type { typedef T type; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_DEPENDENT_TYPE_HPP galera-4-26.4.25/asio/asio/detail/gcc_hppa_fenced_block.hpp000644 000164 177776 00000002707 15107057155 024605 0ustar00jenkinsnogroup000000 000000 // // detail/gcc_hppa_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_hppa_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_hppa_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_hppa_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_hppa_fenced_block() { barrier(); } private: static void barrier() { // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) #endif // ASIO_DETAIL_GCC_HPPA_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/null_signal_blocker.hpp000644 000164 177776 00000002746 15107057155 024376 0ustar00jenkinsnogroup000000 000000 // // detail/null_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. null_signal_blocker() { } // Destructor restores the previous signal mask. ~null_signal_blocker() { } // Block all signals for the calling thread. void block() { } // Restore the previous signal mask. void unblock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) // || defined(ASIO_WINDOWS) // || defined(ASIO_WINDOWS_RUNTIME) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // ASIO_DETAIL_NULL_SIGNAL_BLOCKER_HPP galera-4-26.4.25/asio/asio/detail/regex_fwd.hpp000644 000164 177776 00000001460 15107057155 022330 0ustar00jenkinsnogroup000000 000000 // // detail/regex_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REGEX_FWD_HPP #define ASIO_DETAIL_REGEX_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if defined(ASIO_HAS_BOOST_REGEX) #include #include namespace boost { template struct sub_match; template class match_results; } // namespace boost #endif // defined(ASIO_HAS_BOOST_REGEX) #endif // ASIO_DETAIL_REGEX_FWD_HPP galera-4-26.4.25/asio/asio/detail/string_view.hpp000644 000164 177776 00000002622 15107057155 022717 0ustar00jenkinsnogroup000000 000000 // // detail/string_view.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STRING_VIEW_HPP #define ASIO_DETAIL_STRING_VIEW_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STRING_VIEW) #if defined(ASIO_HAS_STD_STRING_VIEW) # include #elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) # include #else // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) # error ASIO_HAS_STRING_VIEW is set but no string_view is available #endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) namespace asio { #if defined(ASIO_HAS_STD_STRING_VIEW) using std::basic_string_view; using std::string_view; #elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) using std::experimental::basic_string_view; using std::experimental::string_view; #endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) } // namespace asio # define ASIO_STRING_VIEW_PARAM asio::string_view #else // defined(ASIO_HAS_STRING_VIEW) # define ASIO_STRING_VIEW_PARAM const std::string& #endif // defined(ASIO_HAS_STRING_VIEW) #endif // ASIO_DETAIL_STRING_VIEW_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_handle_service.hpp000644 000164 177776 00000030107 15107057155 025220 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/win_iocp_handle_read_op.hpp" #include "asio/detail/win_iocp_handle_write_op.hpp" #include "asio/detail/win_iocp_io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_handle_service : public execution_context_service_base { public: // The native type of a stream handle. typedef HANDLE native_handle_type; // The implementation type of the stream handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), safe_cancellation_thread_id_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_iocp_handle_service; // The native stream handle representation. native_handle_type handle_; // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the handle. DWORD safe_cancellation_thread_id_; // Pointers to adjacent handle implementations in linked list. implementation_type* next_; implementation_type* prev_; }; ASIO_DECL win_iocp_handle_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_iocp_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Write the given data. Returns the number of bytes written. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return write_some_at(impl, 0, buffers, ec); } // Write the given data at the specified offset. Returns the number of bytes // written. template size_t write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { asio::const_buffer buffer = buffer_sequence_adapter::first(buffers); return do_write(impl, offset, buffer, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op< ConstBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, "handle", &impl, reinterpret_cast(impl.handle_), "async_write_some")); start_write_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous write at a specified offset. The data being written // must be valid for the lifetime of the asynchronous operation. template void async_write_some_at(implementation_type& impl, uint64_t offset, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_write_op< ConstBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, "handle", &impl, reinterpret_cast(impl.handle_), "async_write_some_at")); start_write_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return read_some_at(impl, 0, buffers, ec); } // Read some data at a specified offset. Returns the number of bytes received. template size_t read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { asio::mutable_buffer buffer = buffer_sequence_adapter::first(buffers); return do_read(impl, offset, buffer, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, "handle", &impl, reinterpret_cast(impl.handle_), "async_read_some")); start_read_op(impl, 0, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } // Start an asynchronous read at a specified offset. The buffer for the data // being received must be valid for the lifetime of the asynchronous // operation. template void async_read_some_at(implementation_type& impl, uint64_t offset, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_handle_read_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((iocp_service_.context(), *p.p, "handle", &impl, reinterpret_cast(impl.handle_), "async_read_some_at")); start_read_op(impl, offset, buffer_sequence_adapter::first(buffers), p.p); p.v = p.p = 0; } private: // Prevent the use of the null_buffers type with this service. size_t write_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_write_some(implementation_type& impl, const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex); template void async_write_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex); size_t read_some(implementation_type& impl, const null_buffers& buffers, asio::error_code& ec); size_t read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, asio::error_code& ec); template void async_read_some(implementation_type& impl, const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex); template void async_read_some_at(implementation_type& impl, uint64_t offset, const null_buffers& buffers, Handler& handler, const IoExecutor& io_ex); // Helper class for waiting for synchronous operations to complete. class overlapped_wrapper; // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_write(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, asio::error_code& ec); // Helper function to start a write operation. ASIO_DECL void start_write_op(implementation_type& impl, uint64_t offset, const asio::const_buffer& buffer, operation* op); // Helper function to perform a synchronous write operation. ASIO_DECL size_t do_read(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, asio::error_code& ec); // Helper function to start a read operation. ASIO_DECL void start_read_op(implementation_type& impl, uint64_t offset, const asio::mutable_buffer& buffer, operation* op); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id(implementation_type& impl); // Helper function to close a handle when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(implementation_type& impl); // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_context& iocp_service_; // Mutex to protect access to the linked list of implementations. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/work_dispatcher.hpp000644 000164 177776 00000003533 15107057155 023551 0ustar00jenkinsnogroup000000 000000 // // detail/work_dispatcher.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WORK_DISPATCHER_HPP #define ASIO_DETAIL_WORK_DISPATCHER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_executor.hpp" #include "asio/associated_allocator.hpp" #include "asio/executor_work_guard.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class work_dispatcher { public: template explicit work_dispatcher(ASIO_MOVE_ARG(CompletionHandler) handler) : work_((get_associated_executor)(handler)), handler_(ASIO_MOVE_CAST(CompletionHandler)(handler)) { } #if defined(ASIO_HAS_MOVE) work_dispatcher(const work_dispatcher& other) : work_(other.work_), handler_(other.handler_) { } work_dispatcher(work_dispatcher&& other) : work_(ASIO_MOVE_CAST(executor_work_guard< typename associated_executor::type>)(other.work_)), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { typename associated_allocator::type alloc( (get_associated_allocator)(handler_)); work_.get_executor().dispatch( ASIO_MOVE_CAST(Handler)(handler_), alloc); work_.reset(); } private: executor_work_guard::type> work_; Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WORK_DISPATCHER_HPP galera-4-26.4.25/asio/asio/detail/descriptor_write_op.hpp000644 000164 177776 00000007723 15107057155 024454 0ustar00jenkinsnogroup000000 000000 // // detail/descriptor_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #define ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_work.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class descriptor_write_op_base : public reactor_op { public: descriptor_write_op_base(int descriptor, const ConstBufferSequence& buffers, func_type complete_func) : reactor_op(&descriptor_write_op_base::do_perform, complete_func), descriptor_(descriptor), buffers_(buffers) { } static status do_perform(reactor_op* base) { descriptor_write_op_base* o(static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = descriptor_ops::non_blocking_write(o->descriptor_, bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_) ? done : not_done; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_write", o->ec_, o->bytes_transferred_)); return result; } private: int descriptor_; ConstBufferSequence buffers_; }; template class descriptor_write_op : public descriptor_write_op_base { public: ASIO_DEFINE_HANDLER_PTR(descriptor_write_op); descriptor_write_op(int descriptor, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : descriptor_write_op_base( descriptor, buffers, &descriptor_write_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. descriptor_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP galera-4-26.4.25/asio/asio/detail/io_control.hpp000644 000164 177776 00000003404 15107057155 022525 0ustar00jenkinsnogroup000000 000000 // // detail/io_control.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IO_CONTROL_HPP #define ASIO_DETAIL_IO_CONTROL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace io_control { // I/O control command for getting number of bytes available. class bytes_readable { public: // Default constructor. bytes_readable() : value_(0) { } // Construct with a specific command value. bytes_readable(std::size_t value) : value_(static_cast(value)) { } // Get the name of the IO control command. int name() const { return static_cast(ASIO_OS_DEF(FIONREAD)); } // Set the value of the I/O control command. void set(std::size_t value) { value_ = static_cast(value); } // Get the current value of the I/O control command. std::size_t get() const { return static_cast(value_); } // Get the address of the command data. detail::ioctl_arg_type* data() { return &value_; } // Get the address of the command data. const detail::ioctl_arg_type* data() const { return &value_; } private: detail::ioctl_arg_type value_; }; } // namespace io_control } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IO_CONTROL_HPP galera-4-26.4.25/asio/asio/detail/handler_tracking.hpp000644 000164 177776 00000017075 15107057155 023666 0ustar00jenkinsnogroup000000 000000 // // detail/handler_tracking.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_TRACKING_HPP #define ASIO_DETAIL_HANDLER_TRACKING_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { class execution_context; } // namespace asio #if defined(ASIO_CUSTOM_HANDLER_TRACKING) # include ASIO_CUSTOM_HANDLER_TRACKING #elif defined(ASIO_ENABLE_HANDLER_TRACKING) # include "asio/error_code.hpp" # include "asio/detail/cstdint.hpp" # include "asio/detail/static_mutex.hpp" # include "asio/detail/tss_ptr.hpp" #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_CUSTOM_HANDLER_TRACKING) // The user-specified header must define the following macros: // - ASIO_INHERIT_TRACKED_HANDLER // - ASIO_ALSO_INHERIT_TRACKED_HANDLER // - ASIO_HANDLER_TRACKING_INIT // - ASIO_HANDLER_CREATION(args) // - ASIO_HANDLER_COMPLETION(args) // - ASIO_HANDLER_INVOCATION_BEGIN(args) // - ASIO_HANDLER_INVOCATION_END // - ASIO_HANDLER_OPERATION(args) // - ASIO_HANDLER_REACTOR_REGISTRATION(args) // - ASIO_HANDLER_REACTOR_DEREGISTRATION(args) // - ASIO_HANDLER_REACTOR_READ_EVENT // - ASIO_HANDLER_REACTOR_WRITE_EVENT // - ASIO_HANDLER_REACTOR_ERROR_EVENT // - ASIO_HANDLER_REACTOR_EVENTS(args) // - ASIO_HANDLER_REACTOR_OPERATION(args) # if !defined(ASIO_ENABLE_HANDLER_TRACKING) # define ASIO_ENABLE_HANDLER_TRACKING 1 # endif /// !defined(ASIO_ENABLE_HANDLER_TRACKING) #elif defined(ASIO_ENABLE_HANDLER_TRACKING) class handler_tracking { public: class completion; // Base class for objects containing tracked handlers. class tracked_handler { private: // Only the handler_tracking class will have access to the id. friend class handler_tracking; friend class completion; uint64_t id_; protected: // Constructor initialises with no id. tracked_handler() : id_(0) {} // Prevent deletion through this type. ~tracked_handler() {} }; // Initialise the tracking system. ASIO_DECL static void init(); // Record the creation of a tracked handler. ASIO_DECL static void creation( execution_context& context, tracked_handler& h, const char* object_type, void* object, uintmax_t native_handle, const char* op_name); class completion { public: // Constructor records that handler is to be invoked with no arguments. ASIO_DECL explicit completion(const tracked_handler& h); // Destructor records only when an exception is thrown from the handler, or // if the memory is being freed without the handler having been invoked. ASIO_DECL ~completion(); // Records that handler is to be invoked with no arguments. ASIO_DECL void invocation_begin(); // Records that handler is to be invoked with one arguments. ASIO_DECL void invocation_begin(const asio::error_code& ec); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, std::size_t bytes_transferred); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, int signal_number); // Constructor records that handler is to be invoked with two arguments. ASIO_DECL void invocation_begin( const asio::error_code& ec, const char* arg); // Record that handler invocation has ended. ASIO_DECL void invocation_end(); private: friend class handler_tracking; uint64_t id_; bool invoked_; completion* next_; }; // Record an operation that is not directly associated with a handler. ASIO_DECL static void operation(execution_context& context, const char* object_type, void* object, uintmax_t native_handle, const char* op_name); // Record that a descriptor has been registered with the reactor. ASIO_DECL static void reactor_registration(execution_context& context, uintmax_t native_handle, uintmax_t registration); // Record that a descriptor has been deregistered from the reactor. ASIO_DECL static void reactor_deregistration(execution_context& context, uintmax_t native_handle, uintmax_t registration); // Record a reactor-based operation that is associated with a handler. ASIO_DECL static void reactor_events(execution_context& context, uintmax_t registration, unsigned events); // Record a reactor-based operation that is associated with a handler. ASIO_DECL static void reactor_operation( const tracked_handler& h, const char* op_name, const asio::error_code& ec); // Record a reactor-based operation that is associated with a handler. ASIO_DECL static void reactor_operation( const tracked_handler& h, const char* op_name, const asio::error_code& ec, std::size_t bytes_transferred); // Write a line of output. ASIO_DECL static void write_line(const char* format, ...); private: struct tracking_state; ASIO_DECL static tracking_state* get_state(); }; # define ASIO_INHERIT_TRACKED_HANDLER \ : public asio::detail::handler_tracking::tracked_handler # define ASIO_ALSO_INHERIT_TRACKED_HANDLER \ , public asio::detail::handler_tracking::tracked_handler # define ASIO_HANDLER_TRACKING_INIT \ asio::detail::handler_tracking::init() # define ASIO_HANDLER_CREATION(args) \ asio::detail::handler_tracking::creation args # define ASIO_HANDLER_COMPLETION(args) \ asio::detail::handler_tracking::completion tracked_completion args # define ASIO_HANDLER_INVOCATION_BEGIN(args) \ tracked_completion.invocation_begin args # define ASIO_HANDLER_INVOCATION_END \ tracked_completion.invocation_end() # define ASIO_HANDLER_OPERATION(args) \ asio::detail::handler_tracking::operation args # define ASIO_HANDLER_REACTOR_REGISTRATION(args) \ asio::detail::handler_tracking::reactor_registration args # define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) \ asio::detail::handler_tracking::reactor_deregistration args # define ASIO_HANDLER_REACTOR_READ_EVENT 1 # define ASIO_HANDLER_REACTOR_WRITE_EVENT 2 # define ASIO_HANDLER_REACTOR_ERROR_EVENT 4 # define ASIO_HANDLER_REACTOR_EVENTS(args) \ asio::detail::handler_tracking::reactor_events args # define ASIO_HANDLER_REACTOR_OPERATION(args) \ asio::detail::handler_tracking::reactor_operation args #else // defined(ASIO_ENABLE_HANDLER_TRACKING) # define ASIO_INHERIT_TRACKED_HANDLER # define ASIO_ALSO_INHERIT_TRACKED_HANDLER # define ASIO_HANDLER_TRACKING_INIT (void)0 # define ASIO_HANDLER_CREATION(args) (void)0 # define ASIO_HANDLER_COMPLETION(args) (void)0 # define ASIO_HANDLER_INVOCATION_BEGIN(args) (void)0 # define ASIO_HANDLER_INVOCATION_END (void)0 # define ASIO_HANDLER_OPERATION(args) (void)0 # define ASIO_HANDLER_REACTOR_REGISTRATION(args) (void)0 # define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) (void)0 # define ASIO_HANDLER_REACTOR_READ_EVENT 0 # define ASIO_HANDLER_REACTOR_WRITE_EVENT 0 # define ASIO_HANDLER_REACTOR_ERROR_EVENT 0 # define ASIO_HANDLER_REACTOR_EVENTS(args) (void)0 # define ASIO_HANDLER_REACTOR_OPERATION(args) (void)0 #endif // defined(ASIO_ENABLE_HANDLER_TRACKING) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/handler_tracking.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_HANDLER_TRACKING_HPP galera-4-26.4.25/asio/asio/detail/posix_mutex.hpp000644 000164 177776 00000003023 15107057155 022737 0ustar00jenkinsnogroup000000 000000 // // detail/posix_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_MUTEX_HPP #define ASIO_DETAIL_POSIX_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event; class posix_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL posix_mutex(); // Destructor. ~posix_mutex() { ::pthread_mutex_destroy(&mutex_); // Ignore EBUSY. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } private: friend class posix_event; ::pthread_mutex_t mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/event.hpp000644 000164 177776 00000002315 15107057155 021477 0ustar00jenkinsnogroup000000 000000 // // detail/event.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EVENT_HPP #define ASIO_DETAIL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_event.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_event.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_event.hpp" #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # include "asio/detail/std_event.hpp" #else # error Only Windows, POSIX and std::condition_variable are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef null_event event; #elif defined(ASIO_WINDOWS) typedef win_event event; #elif defined(ASIO_HAS_PTHREADS) typedef posix_event event; #elif defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) typedef std_event event; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_EVENT_HPP galera-4-26.4.25/asio/asio/detail/std_thread.hpp000644 000164 177776 00000002464 15107057155 022504 0ustar00jenkinsnogroup000000 000000 // // detail/std_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_THREAD_HPP #define ASIO_DETAIL_STD_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_THREAD) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_thread : private noncopyable { public: // Constructor. template std_thread(Function f, unsigned int = 0) : thread_(f) { } // Destructor. ~std_thread() { join(); } // Wait for the thread to exit. void join() { if (thread_.joinable()) thread_.join(); } // Get number of CPUs. static std::size_t hardware_concurrency() { return std::thread::hardware_concurrency(); } private: std::thread thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_THREAD) #endif // ASIO_DETAIL_STD_THREAD_HPP galera-4-26.4.25/asio/asio/detail/win_static_mutex.hpp000644 000164 177776 00000003403 15107057155 023743 0ustar00jenkinsnogroup000000 000000 // // detail/win_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #define ASIO_DETAIL_WIN_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct win_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. ASIO_DECL void init(); // Initialisation must be performed in a separate function to the "public" // init() function since the compiler does not support the use of structured // exceptions and C++ exceptions in the same function. ASIO_DECL int do_init(); // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } bool initialised_; ::CRITICAL_SECTION crit_section_; }; #if defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0 } } #else // defined(UNDER_CE) # define ASIO_WIN_STATIC_MUTEX_INIT { false, { 0, 0, 0, 0, 0, 0 } } #endif // defined(UNDER_CE) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_static_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_STATIC_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/win_object_handle_service.hpp000644 000164 177776 00000013674 15107057155 025546 0ustar00jenkinsnogroup000000 000000 // // detail/win_object_handle_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #define ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/wait_handler.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_object_handle_service : public execution_context_service_base { public: // The native type of an object handle. typedef HANDLE native_handle_type; // The implementation type of the object handle. class implementation_type { public: // Default constructor. implementation_type() : handle_(INVALID_HANDLE_VALUE), wait_handle_(INVALID_HANDLE_VALUE), owner_(0), next_(0), prev_(0) { } private: // Only this service will have access to the internal values. friend class win_object_handle_service; // The native object handle representation. May be accessed or modified // without locking the mutex. native_handle_type handle_; // The handle used to unregister the wait operation. The mutex must be // locked when accessing or modifying this member. HANDLE wait_handle_; // The operations waiting on the object handle. If there is a registered // wait then the mutex must be locked when accessing or modifying this // member op_queue op_queue_; // The service instance that owns the object handle implementation. win_object_handle_service* owner_; // Pointers to adjacent handle implementations in linked list. The mutex // must be locked when accessing or modifying these members. implementation_type* next_; implementation_type* prev_; }; // Constructor. ASIO_DECL win_object_handle_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new handle implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new handle implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another handle implementation. ASIO_DECL void move_assign(implementation_type& impl, win_object_handle_service& other_service, implementation_type& other_impl); // Destroy a handle implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native handle to a handle implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec); // Determine whether the handle is open. bool is_open(const implementation_type& impl) const { return impl.handle_ != INVALID_HANDLE_VALUE && impl.handle_ != 0; } // Destroy a handle implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native handle representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.handle_; } // Cancel all operations associated with the handle. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform a synchronous wait for the object to enter a signalled state. ASIO_DECL void wait(implementation_type& impl, asio::error_code& ec); /// Start an asynchronous wait. template void async_wait(implementation_type& impl, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef wait_handler op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "object_handle", &impl, reinterpret_cast(impl.wait_handle_), "async_wait")); start_wait_op(impl, p.p); p.v = p.p = 0; } private: // Helper function to start an asynchronous wait operation. ASIO_DECL void start_wait_op(implementation_type& impl, wait_op* op); // Helper function to register a wait operation. ASIO_DECL void register_wait_callback( implementation_type& impl, mutex::scoped_lock& lock); // Callback function invoked when the registered wait completes. static ASIO_DECL VOID CALLBACK wait_callback( PVOID param, BOOLEAN timeout); // The scheduler used to post completions. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; // Mutex to protect access to internal state. mutex mutex_; // The head of a linked list of all implementations. implementation_type* impl_list_; // Flag to indicate that the dispatcher has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_object_handle_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) #endif // ASIO_DETAIL_WIN_OBJECT_HANDLE_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/reactive_wait_op.hpp000644 000164 177776 00000005314 15107057155 023704 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_wait_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_WAIT_OP_HPP #define ASIO_DETAIL_REACTIVE_WAIT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_wait_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(reactive_wait_op); reactive_wait_op(Handler& handler, const IoExecutor& io_ex) : reactor_op(&reactive_wait_op::do_perform, &reactive_wait_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static status do_perform(reactor_op*) { return done; } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_wait_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_WAIT_OP_HPP galera-4-26.4.25/asio/asio/detail/reactor_op_queue.hpp000644 000164 177776 00000011352 15107057155 023720 0ustar00jenkinsnogroup000000 000000 // // detail/reactor_op_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #define ASIO_DETAIL_REACTOR_OP_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/hash_map.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactor_op_queue : private noncopyable { public: typedef Descriptor key_type; struct mapped_type : op_queue { mapped_type() {} mapped_type(const mapped_type&) {} void operator=(const mapped_type&) {} }; typedef typename hash_map::value_type value_type; typedef typename hash_map::iterator iterator; // Constructor. reactor_op_queue() : operations_() { } // Obtain iterators to all registered descriptors. iterator begin() { return operations_.begin(); } iterator end() { return operations_.end(); } // Add a new operation to the queue. Returns true if this is the only // operation for the given descriptor, in which case the reactor's event // demultiplexing function call may need to be interrupted and restarted. bool enqueue_operation(Descriptor descriptor, reactor_op* op) { std::pair entry = operations_.insert(value_type(descriptor, mapped_type())); entry.first->second.push(op); return entry.second; } // Cancel all operations associated with the descriptor identified by the // supplied iterator. Any operations pending for the descriptor will be // cancelled. Returns true if any operations were cancelled, in which case // the reactor's event demultiplexing function may need to be interrupted and // restarted. bool cancel_operations(iterator i, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { op->ec_ = ec; i->second.pop(); ops.push(op); } operations_.erase(i); return true; } return false; } // Cancel all operations associated with the descriptor. Any operations // pending for the descriptor will be cancelled. Returns true if any // operations were cancelled, in which case the reactor's event // demultiplexing function may need to be interrupted and restarted. bool cancel_operations(Descriptor descriptor, op_queue& ops, const asio::error_code& ec = asio::error::operation_aborted) { return this->cancel_operations(operations_.find(descriptor), ops, ec); } // Whether there are no operations in the queue. bool empty() const { return operations_.empty(); } // Determine whether there are any operations associated with the descriptor. bool has_operation(Descriptor descriptor) const { return operations_.find(descriptor) != operations_.end(); } // Perform the operations corresponding to the descriptor identified by the // supplied iterator. Returns true if there are still unfinished operations // queued for the descriptor. bool perform_operations(iterator i, op_queue& ops) { if (i != operations_.end()) { while (reactor_op* op = i->second.front()) { if (op->perform()) { i->second.pop(); ops.push(op); } else { return true; } } operations_.erase(i); } return false; } // Perform the operations corresponding to the descriptor. Returns true if // there are still unfinished operations queued for the descriptor. bool perform_operations(Descriptor descriptor, op_queue& ops) { return this->perform_operations(operations_.find(descriptor), ops); } // Get all operations owned by the queue. void get_all_operations(op_queue& ops) { iterator i = operations_.begin(); while (i != operations_.end()) { iterator op_iter = i++; ops.push(op_iter->second); operations_.erase(op_iter); } } private: // The operations that are currently executing asynchronously. hash_map operations_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_QUEUE_HPP galera-4-26.4.25/asio/asio/detail/socket_ops.hpp000644 000164 177776 00000024627 15107057155 022541 0ustar00jenkinsnogroup000000 000000 // // detail/socket_ops.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPS_HPP #define ASIO_DETAIL_SOCKET_OPS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_ops { // Socket state bits. enum { // The user wants a non-blocking socket. user_set_non_blocking = 1, // The socket has been set non-blocking. internal_non_blocking = 2, // Helper "state" used to determine whether the socket is non-blocking. non_blocking = user_set_non_blocking | internal_non_blocking, // User wants connection_aborted errors, which are disabled by default. enable_connection_aborted = 4, // The user set the linger option. Needs to be checked when closing. user_set_linger = 8, // The socket is stream-oriented. stream_oriented = 16, // The socket is datagram-oriented. datagram_oriented = 32, // The socket may have been dup()-ed. possible_dup = 64 }; typedef unsigned char state_type; struct noop_deleter { void operator()(void*) {} }; typedef shared_ptr shared_cancel_token_type; typedef weak_ptr weak_cancel_token_type; #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL socket_type accept(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL socket_type sync_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_accept(socket_type s, void* output_buffer, DWORD address_length, socket_addr_type* addr, std::size_t* addrlen, socket_type new_socket, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_accept(socket_type s, state_type state, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, socket_type& new_socket); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL int bind(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL int close(socket_type s, state_type& state, bool destruction, asio::error_code& ec); ASIO_DECL bool set_user_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL bool set_internal_non_blocking(socket_type s, state_type& state, bool value, asio::error_code& ec); ASIO_DECL int shutdown(socket_type s, int what, asio::error_code& ec); ASIO_DECL int connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL void sync_connect(socket_type s, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_connect(socket_type s, asio::error_code& ec); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_connect(socket_type s, asio::error_code& ec); ASIO_DECL int socketpair(int af, int type, int protocol, socket_type sv[2], asio::error_code& ec); ASIO_DECL bool sockatmark(socket_type s, asio::error_code& ec); ASIO_DECL size_t available(socket_type s, asio::error_code& ec); ASIO_DECL int listen(socket_type s, int backlog, asio::error_code& ec); #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef WSABUF buf; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef iovec buf; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) ASIO_DECL void init_buf(buf& b, void* data, size_t size); ASIO_DECL void init_buf(buf& b, const void* data, size_t size); ASIO_DECL signed_size_type recv(socket_type s, buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_recv(socket_type s, state_type state, buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recv(state_type state, const weak_cancel_token_type& cancel_token, bool all_empty, asio::error_code& ec, size_t bytes_transferred); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recv(socket_type s, buf* bufs, size_t count, int flags, bool is_stream, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL size_t sync_recvfrom(socket_type s, state_type state, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvfrom( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvfrom(socket_type s, buf* bufs, size_t count, int flags, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); ASIO_DECL size_t sync_recvmsg(socket_type s, state_type state, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_recvmsg( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_recvmsg(socket_type s, buf* bufs, size_t count, int in_flags, int& out_flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec); ASIO_DECL size_t sync_send(socket_type s, state_type state, const buf* bufs, size_t count, int flags, bool all_empty, asio::error_code& ec); #if defined(ASIO_HAS_IOCP) ASIO_DECL void complete_iocp_send( const weak_cancel_token_type& cancel_token, asio::error_code& ec); #else // defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_send(socket_type s, const buf* bufs, size_t count, int flags, asio::error_code& ec, size_t& bytes_transferred); #endif // defined(ASIO_HAS_IOCP) ASIO_DECL signed_size_type sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); ASIO_DECL size_t sync_sendto(socket_type s, state_type state, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec); #if !defined(ASIO_HAS_IOCP) ASIO_DECL bool non_blocking_sendto(socket_type s, const buf* bufs, size_t count, int flags, const socket_addr_type* addr, std::size_t addrlen, asio::error_code& ec, size_t& bytes_transferred); #endif // !defined(ASIO_HAS_IOCP) ASIO_DECL socket_type socket(int af, int type, int protocol, asio::error_code& ec); ASIO_DECL int setsockopt(socket_type s, state_type& state, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); ASIO_DECL int getsockopt(socket_type s, state_type state, int level, int optname, void* optval, size_t* optlen, asio::error_code& ec); ASIO_DECL int getpeername(socket_type s, socket_addr_type* addr, std::size_t* addrlen, bool cached, asio::error_code& ec); ASIO_DECL int getsockname(socket_type s, socket_addr_type* addr, std::size_t* addrlen, asio::error_code& ec); ASIO_DECL int ioctl(socket_type s, state_type& state, int cmd, ioctl_arg_type* arg, asio::error_code& ec); ASIO_DECL int select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, timeval* timeout, asio::error_code& ec); ASIO_DECL int poll_read(socket_type s, state_type state, int msec, asio::error_code& ec); ASIO_DECL int poll_write(socket_type s, state_type state, int msec, asio::error_code& ec); ASIO_DECL int poll_error(socket_type s, state_type state, int msec, asio::error_code& ec); ASIO_DECL int poll_connect(socket_type s, int msec, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL const char* inet_ntop(int af, const void* src, char* dest, size_t length, unsigned long scope_id, asio::error_code& ec); ASIO_DECL int inet_pton(int af, const char* src, void* dest, unsigned long* scope_id, asio::error_code& ec); ASIO_DECL int gethostname(char* name, int namelen, asio::error_code& ec); #if !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL asio::error_code getaddrinfo(const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL asio::error_code background_getaddrinfo( const weak_cancel_token_type& cancel_token, const char* host, const char* service, const addrinfo_type& hints, addrinfo_type** result, asio::error_code& ec); ASIO_DECL void freeaddrinfo(addrinfo_type* ai); ASIO_DECL asio::error_code getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int flags, asio::error_code& ec); ASIO_DECL asio::error_code sync_getnameinfo( const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); ASIO_DECL asio::error_code background_getnameinfo( const weak_cancel_token_type& cancel_token, const socket_addr_type* addr, std::size_t addrlen, char* host, std::size_t hostlen, char* serv, std::size_t servlen, int sock_type, asio::error_code& ec); #endif // !defined(ASIO_WINDOWS_RUNTIME) ASIO_DECL u_long_type network_to_host_long(u_long_type value); ASIO_DECL u_long_type host_to_network_long(u_long_type value); ASIO_DECL u_short_type network_to_host_short(u_short_type value); ASIO_DECL u_short_type host_to_network_short(u_short_type value); } // namespace socket_ops } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_ops.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SOCKET_OPS_HPP galera-4-26.4.25/asio/asio/detail/null_event.hpp000644 000164 177776 00000003616 15107057155 022536 0ustar00jenkinsnogroup000000 000000 // // detail/null_event.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_EVENT_HPP #define ASIO_DETAIL_NULL_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_event : private noncopyable { public: // Constructor. null_event() { } // Destructor. ~null_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock&) { } // Signal all waiters. template void signal_all(Lock&) { } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock&) { } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock&) { return false; } // Reset the event. template void clear(Lock&) { } // Wait for the event to become signalled. template void wait(Lock&) { do_wait(); } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock&, long usec) { do_wait_for_usec(usec); return true; } private: ASIO_DECL static void do_wait(); ASIO_DECL static void do_wait_for_usec(long usec); }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/null_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_NULL_EVENT_HPP galera-4-26.4.25/asio/asio/detail/resolve_endpoint_op.hpp000644 000164 177776 00000010701 15107057155 024431 0ustar00jenkinsnogroup000000 000000 // // detail/resolve_endpoint_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #define ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/resolve_op.hpp" #include "asio/detail/socket_ops.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_endpoint_op : public resolve_op { public: ASIO_DEFINE_HANDLER_PTR(resolve_endpoint_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_results results_type; #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif resolve_endpoint_op(socket_ops::weak_cancel_token_type cancel_token, const endpoint_type& endpoint, scheduler_impl& sched, Handler& handler, const IoExecutor& io_ex) : resolve_op(&resolve_endpoint_op::do_complete), cancel_token_(cancel_token), endpoint_(endpoint), scheduler_(sched), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_endpoint_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); if (owner && owner != &o->scheduler_) { // The operation is being run on the worker io_context. Time to perform // the resolver operation. // Perform the blocking endpoint resolution operation. char host_name[NI_MAXHOST]; char service_name[NI_MAXSERV]; socket_ops::background_getnameinfo(o->cancel_token_, o->endpoint_.data(), o->endpoint_.size(), host_name, NI_MAXHOST, service_name, NI_MAXSERV, o->endpoint_.protocol().type(), o->ec_); o->results_ = results_type::create(o->endpoint_, host_name, service_name); // Pass operation back to main io_context for completion. o->scheduler_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_context. The completion // handler is ready to be delivered. ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->results_); p.h = asio::detail::addressof(handler.handler_); p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; endpoint_type endpoint_; scheduler_impl& scheduler_; Handler handler_; IoExecutor io_executor_; results_type results_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVER_ENDPOINT_OP_HPP galera-4-26.4.25/asio/asio/detail/gcc_x86_fenced_block.hpp000644 000164 177776 00000004513 15107057155 024277 0ustar00jenkinsnogroup000000 000000 // // detail/gcc_x86_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_x86_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_x86_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_x86_fenced_block(full_t) { lbarrier(); } // Destructor. ~gcc_x86_fenced_block() { sbarrier(); } private: static int barrier() { int r = 0, m = 1; __asm__ __volatile__ ( "xchgl %0, %1" : "=r"(r), "=m"(m) : "0"(1), "m"(m) : "memory", "cc"); return r; } static void lbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_lfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("lfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } static void sbarrier() { #if defined(__SSE2__) # if (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __builtin_ia32_sfence(); # else // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) __asm__ __volatile__ ("sfence" ::: "memory"); # endif // (__GNUC__ >= 4) && !defined(__INTEL_COMPILER) && !defined(__ICL) #else // defined(__SSE2__) barrier(); #endif // defined(__SSE2__) } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) #endif // ASIO_DETAIL_GCC_X86_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/win_mutex.hpp000644 000164 177776 00000003232 15107057155 022374 0ustar00jenkinsnogroup000000 000000 // // detail/win_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_MUTEX_HPP #define ASIO_DETAIL_WIN_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. ASIO_DECL win_mutex(); // Destructor. ~win_mutex() { ::DeleteCriticalSection(&crit_section_); } // Lock the mutex. void lock() { ::EnterCriticalSection(&crit_section_); } // Unlock the mutex. void unlock() { ::LeaveCriticalSection(&crit_section_); } private: // Initialisation must be performed in a separate function to the constructor // since the compiler does not support the use of structured exceptions and // C++ exceptions in the same function. ASIO_DECL int do_init(); ::CRITICAL_SECTION crit_section_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_mutex.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_service.hpp000644 000164 177776 00000046254 15107057155 025267 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/socket_base.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_reactor.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_context.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_accept_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_recvfrom_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_service : public execution_context_service_base >, public win_iocp_socket_service_base { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. class native_handle_type { public: native_handle_type(socket_type s) : socket_(s), have_remote_endpoint_(false) { } native_handle_type(socket_type s, const endpoint_type& ep) : socket_(s), have_remote_endpoint_(true), remote_endpoint_(ep) { } void operator=(socket_type s) { socket_ = s; have_remote_endpoint_ = false; remote_endpoint_ = endpoint_type(); } operator socket_type() const { return socket_; } bool have_remote_endpoint() const { return have_remote_endpoint_; } endpoint_type remote_endpoint() const { return remote_endpoint_; } private: socket_type socket_; bool have_remote_endpoint_; endpoint_type remote_endpoint_; }; // The implementation type of the socket. struct implementation_type : win_iocp_socket_service_base::base_implementation_type { // Default constructor. implementation_type() : protocol_(endpoint_type().protocol()), have_remote_endpoint_(false), remote_endpoint_() { } // The protocol associated with the socket. protocol_type protocol_; // Whether we have a cached remote endpoint. bool have_remote_endpoint_; // A cached remote endpoint. endpoint_type remote_endpoint_; }; // Constructor. win_iocp_socket_service(execution_context& context) : execution_context_service_base< win_iocp_socket_service >(context), win_iocp_socket_service_base(context) { } // Destroy all user-defined handler objects owned by the service. void shutdown() { this->base_shutdown(); } // Move-construct a new socket implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-assign from another socket implementation. void move_assign(implementation_type& impl, win_iocp_socket_service_base& other_service, implementation_type& other_impl) { this->base_move_assign(impl, other_service, other_impl); impl.protocol_ = other_impl.protocol_; other_impl.protocol_ = endpoint_type().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = endpoint_type(); } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type& impl, win_iocp_socket_service&, typename win_iocp_socket_service< Protocol1>::implementation_type& other_impl) { this->base_move_construct(impl, other_impl); impl.protocol_ = protocol_type(other_impl.protocol_); other_impl.protocol_ = typename Protocol1::endpoint().protocol(); impl.have_remote_endpoint_ = other_impl.have_remote_endpoint_; other_impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = other_impl.remote_endpoint_; other_impl.remote_endpoint_ = typename Protocol1::endpoint(); } // Open a new socket implementation. asio::error_code open(implementation_type& impl, const protocol_type& protocol, asio::error_code& ec) { if (!do_open(impl, protocol.family(), protocol.type(), protocol.protocol(), ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = false; impl.remote_endpoint_ = endpoint_type(); } return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type& impl, const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { if (!do_assign(impl, protocol.type(), native_socket, ec)) { impl.protocol_ = protocol; impl.have_remote_endpoint_ = native_socket.have_remote_endpoint(); impl.remote_endpoint_ = native_socket.remote_endpoint(); } return ec; } // Get the native socket representation. native_handle_type native_handle(implementation_type& impl) { if (impl.have_remote_endpoint_) return native_handle_type(impl.socket_, impl.remote_endpoint_); return native_handle_type(impl.socket_); } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type& impl, const endpoint_type& endpoint, asio::error_code& ec) { socket_ops::bind(impl.socket_, endpoint.data(), endpoint.size(), ec); return ec; } // Set a socket option. template asio::error_code set_option(implementation_type& impl, const Option& option, asio::error_code& ec) { socket_ops::setsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), option.size(impl.protocol_), ec); return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type& impl, Option& option, asio::error_code& ec) const { std::size_t size = option.size(impl.protocol_); socket_ops::getsockopt(impl.socket_, impl.state_, option.level(impl.protocol_), option.name(impl.protocol_), option.data(impl.protocol_), &size, ec); if (!ec) option.resize(impl.protocol_, size); return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getsockname(impl.socket_, endpoint.data(), &addr_len, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type& impl, asio::error_code& ec) const { endpoint_type endpoint = impl.remote_endpoint_; std::size_t addr_len = endpoint.capacity(); if (socket_ops::getpeername(impl.socket_, endpoint.data(), &addr_len, impl.have_remote_endpoint_, ec)) return endpoint_type(); endpoint.resize(addr_len); return endpoint; } // Disable sends or receives on the socket. asio::error_code shutdown(base_implementation_type& impl, socket_base::shutdown_type what, asio::error_code& ec) { socket_ops::shutdown(impl.socket_, what, ec); return ec; } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template size_t send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_sendto(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, destination.data(), destination.size(), ec); } // Wait until data can be sent without blocking. size_t send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type& impl, const ConstBufferSequence& buffers, const endpoint_type& destination, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op< ConstBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_send_to")); buffer_sequence_adapter bufs(buffers); start_send_to_op(impl, bufs.buffers(), bufs.count(), destination.data(), static_cast(destination.size()), flags, p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type& impl, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_send_to(null_buffers)")); start_reactor_op(impl, select_reactor::write_op, p.p); p.v = p.p = 0; } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template size_t receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endpoint, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); std::size_t addr_len = sender_endpoint.capacity(); std::size_t bytes_recvd = socket_ops::sync_recvfrom( impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, sender_endpoint.data(), &addr_len, ec); if (!ec) sender_endpoint.resize(addr_len); return bytes_recvd; } // Wait until data can be received without blocking. size_t receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type& impl, const MutableBufferSequence& buffers, endpoint_type& sender_endp, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvfrom_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(sender_endp, impl.cancel_token_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive_from")); buffer_sequence_adapter bufs(buffers); start_receive_from_op(impl, bufs.buffers(), bufs.count(), sender_endp.data(), flags, &p.p->endpoint_size(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_from(implementation_type& impl, const null_buffers&, endpoint_type& sender_endpoint, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive_from(null_buffers)")); // Reset endpoint since it can be given no sensible value at this time. sender_endpoint = endpoint_type(); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Accept a new connection. template asio::error_code accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, asio::error_code& ec) { // We cannot accept a socket that is already open. if (peer.is_open()) { ec = asio::error::already_open; return ec; } std::size_t addr_len = peer_endpoint ? peer_endpoint->capacity() : 0; socket_holder new_socket(socket_ops::sync_accept(impl.socket_, impl.state_, peer_endpoint ? peer_endpoint->data() : 0, peer_endpoint ? &addr_len : 0, ec)); // On success, assign new connection to peer socket object. if (new_socket.get() != invalid_socket) { if (peer_endpoint) peer_endpoint->resize(addr_len); peer.assign(impl.protocol_, new_socket.get(), ec); if (!ec) new_socket.release(); } return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type& impl, Socket& peer, endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_accept_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; bool enable_connection_aborted = (impl.state_ & socket_ops::enable_connection_aborted) != 0; p.p = new (p.v) op(*this, impl.socket_, peer, impl.protocol_, peer_endpoint, enable_connection_aborted, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_accept")); start_accept_op(impl, peer.is_open(), p.p->new_socket(), impl.protocol_.family(), impl.protocol_.type(), impl.protocol_.protocol(), p.p->output_buffer(), p.p->address_length(), p.p); p.v = p.p = 0; } #if defined(ASIO_HAS_MOVE) // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_move_accept(implementation_type& impl, const PeerIoExecutor& peer_io_ex, endpoint_type* peer_endpoint, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_move_accept_op< protocol_type, PeerIoExecutor, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; bool enable_connection_aborted = (impl.state_ & socket_ops::enable_connection_aborted) != 0; p.p = new (p.v) op(*this, impl.socket_, impl.protocol_, peer_io_ex, peer_endpoint, enable_connection_aborted, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_accept")); start_accept_op(impl, false, p.p->new_socket(), impl.protocol_.family(), impl.protocol_.type(), impl.protocol_.protocol(), p.p->output_buffer(), p.p->address_length(), p.p); p.v = p.p = 0; } #endif // defined(ASIO_HAS_MOVE) // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type& impl, const endpoint_type& peer_endpoint, asio::error_code& ec) { socket_ops::sync_connect(impl.socket_, peer_endpoint.data(), peer_endpoint.size(), ec); return ec; } // Start an asynchronous connect. template void async_connect(implementation_type& impl, const endpoint_type& peer_endpoint, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_connect_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_connect")); start_connect_op(impl, impl.protocol_.family(), impl.protocol_.type(), peer_endpoint.data(), static_cast(peer_endpoint.size()), p.p); p.v = p.p = 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_accept_op.hpp000644 000164 177776 00000023040 15107057155 025550 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/win_iocp_socket_service_base.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_accept_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_accept_op); win_iocp_socket_accept_op(win_iocp_socket_service_base& socket_service, socket_type socket, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, bool enable_connection_aborted, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_accept_op::do_complete), socket_service_(socket_service), socket_(socket), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint), enable_connection_aborted_(enable_connection_aborted), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } socket_holder& new_socket() { return new_socket_; } void* output_buffer() { return output_buffer_; } DWORD address_length() { return sizeof(sockaddr_storage_type) + 16; } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); if (owner) { typename Protocol::endpoint peer_endpoint; std::size_t addr_len = peer_endpoint.capacity(); socket_ops::complete_iocp_accept(o->socket_, o->output_buffer(), o->address_length(), peer_endpoint.data(), &addr_len, o->new_socket_.get(), ec); // Restart the accept operation if we got the connection_aborted error // and the enable_connection_aborted socket option is not set. if (ec == asio::error::connection_aborted && !o->enable_connection_aborted_) { o->reset(); o->socket_service_.restart_accept_op(o->socket_, o->new_socket_, o->protocol_.family(), o->protocol_.type(), o->protocol_.protocol(), o->output_buffer(), o->address_length(), o); p.v = p.p = 0; return; } // If the socket was successfully accepted, transfer ownership of the // socket to the peer object. if (!ec) { o->peer_.assign(o->protocol_, typename Socket::native_handle_type( o->new_socket_.get(), peer_endpoint), ec); if (!ec) o->new_socket_.release(); } // Pass endpoint back to caller. if (o->peer_endpoint_) *o->peer_endpoint_ = peer_endpoint; } ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: win_iocp_socket_service_base& socket_service_; socket_type socket_; socket_holder new_socket_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2]; bool enable_connection_aborted_; Handler handler_; IoExecutor io_executor_; }; #if defined(ASIO_HAS_MOVE) template class win_iocp_socket_move_accept_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_move_accept_op); win_iocp_socket_move_accept_op( win_iocp_socket_service_base& socket_service, socket_type socket, const Protocol& protocol, const PeerIoExecutor& peer_io_ex, typename Protocol::endpoint* peer_endpoint, bool enable_connection_aborted, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_move_accept_op::do_complete), socket_service_(socket_service), socket_(socket), peer_(peer_io_ex), protocol_(protocol), peer_endpoint_(peer_endpoint), enable_connection_aborted_(enable_connection_aborted), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } socket_holder& new_socket() { return new_socket_; } void* output_buffer() { return output_buffer_; } DWORD address_length() { return sizeof(sockaddr_storage_type) + 16; } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_move_accept_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); if (owner) { typename Protocol::endpoint peer_endpoint; std::size_t addr_len = peer_endpoint.capacity(); socket_ops::complete_iocp_accept(o->socket_, o->output_buffer(), o->address_length(), peer_endpoint.data(), &addr_len, o->new_socket_.get(), ec); // Restart the accept operation if we got the connection_aborted error // and the enable_connection_aborted socket option is not set. if (ec == asio::error::connection_aborted && !o->enable_connection_aborted_) { o->reset(); o->socket_service_.restart_accept_op(o->socket_, o->new_socket_, o->protocol_.family(), o->protocol_.type(), o->protocol_.protocol(), o->output_buffer(), o->address_length(), o); p.v = p.p = 0; return; } // If the socket was successfully accepted, transfer ownership of the // socket to the peer object. if (!ec) { o->peer_.assign(o->protocol_, typename Protocol::socket::native_handle_type( o->new_socket_.get(), peer_endpoint), ec); if (!ec) o->new_socket_.release(); } // Pass endpoint back to caller. if (o->peer_endpoint_) *o->peer_endpoint_ = peer_endpoint; } ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::move_binder2 handler(0, ASIO_MOVE_CAST(Handler)(o->handler_), ec, ASIO_MOVE_CAST(peer_socket_type)(o->peer_)); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: typedef typename Protocol::socket::template rebind_executor::other peer_socket_type; win_iocp_socket_service_base& socket_service_; socket_type socket_; socket_holder new_socket_; peer_socket_type peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; unsigned char output_buffer_[(sizeof(sockaddr_storage_type) + 16) * 2]; bool enable_connection_aborted_; Handler handler_; IoExecutor io_executor_; }; #endif // defined(ASIO_HAS_MOVE) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_ACCEPT_OP_HPP galera-4-26.4.25/asio/asio/detail/reactor_op.hpp000644 000164 177776 00000003077 15107057155 022521 0ustar00jenkinsnogroup000000 000000 // // detail/reactor_op.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_OP_HPP #define ASIO_DETAIL_REACTOR_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactor_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The number of bytes transferred, to be passed to the completion handler. std::size_t bytes_transferred_; // Status returned by perform function. May be used to decide whether it is // worth performing more operations on the descriptor immediately. enum status { not_done, done, done_and_exhausted }; // Perform the operation. Returns true if it is finished. status perform() { return perform_func_(this); } protected: typedef status (*perform_func_type)(reactor_op*); reactor_op(perform_func_type perform_func, func_type complete_func) : operation(complete_func), bytes_transferred_(0), perform_func_(perform_func) { } private: perform_func_type perform_func_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTOR_OP_HPP galera-4-26.4.25/asio/asio/detail/local_free_on_block_exit.hpp000644 000164 177776 00000002517 15107057155 025354 0ustar00jenkinsnogroup000000 000000 // // detail/local_free_on_block_exit.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #define ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #if !defined(ASIO_WINDOWS_APP) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class local_free_on_block_exit : private noncopyable { public: // Constructor blocks all signals for the calling thread. explicit local_free_on_block_exit(void* p) : p_(p) { } // Destructor restores the previous signal mask. ~local_free_on_block_exit() { ::LocalFree(p_); } private: void* p_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS_APP) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_LOCAL_FREE_ON_BLOCK_EXIT_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_handle_read_op.hpp000644 000164 177776 00000007033 15107057155 025173 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_handle_read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_read_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_read_op); win_iocp_handle_read_op(const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_handle_read_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_handle_read_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_HANDLE_EOF) ec = asio::error::eof; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_READ_OP_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_wait_op.hpp000644 000164 177776 00000007061 15107057155 023712 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_wait_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP #define ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_wait_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_wait_op); win_iocp_wait_op(socket_ops::weak_cancel_token_type cancel_token, Handler& handler, const IoExecutor& io_ex) : reactor_op(&win_iocp_wait_op::do_perform, &win_iocp_wait_op::do_complete), cancel_token_(cancel_token), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static status do_perform(reactor_op*) { return done; } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t /*bytes_transferred*/) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_wait_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // The reactor may have stored a result in the operation object. if (o->ec_) ec = o->ec_; // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (o->cancel_token_.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, ec); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_WAIT_OP_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_accept_op.hpp000644 000164 177776 00000016701 15107057155 025551 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_accept_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_accept_op_base : public reactor_op { public: reactive_socket_accept_op_base(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, func_type complete_func) : reactor_op(&reactive_socket_accept_op_base::do_perform, complete_func), socket_(socket), state_(state), peer_(peer), protocol_(protocol), peer_endpoint_(peer_endpoint), addrlen_(peer_endpoint ? peer_endpoint->capacity() : 0) { } static status do_perform(reactor_op* base) { reactive_socket_accept_op_base* o( static_cast(base)); socket_type new_socket = invalid_socket; status result = socket_ops::non_blocking_accept(o->socket_, o->state_, o->peer_endpoint_ ? o->peer_endpoint_->data() : 0, o->peer_endpoint_ ? &o->addrlen_ : 0, o->ec_, new_socket) ? done : not_done; o->new_socket_.reset(new_socket); ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_accept", o->ec_)); return result; } void do_assign() { if (new_socket_.get() != invalid_socket) { if (peer_endpoint_) peer_endpoint_->resize(addrlen_); peer_.assign(protocol_, new_socket_.get(), ec_); if (!ec_) new_socket_.release(); } } private: socket_type socket_; socket_ops::state_type state_; socket_holder new_socket_; Socket& peer_; Protocol protocol_; typename Protocol::endpoint* peer_endpoint_; std::size_t addrlen_; }; template class reactive_socket_accept_op : public reactive_socket_accept_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_accept_op); reactive_socket_accept_op(socket_type socket, socket_ops::state_type state, Socket& peer, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, Handler& handler, const IoExecutor& io_ex) : reactive_socket_accept_op_base(socket, state, peer, protocol, peer_endpoint, &reactive_socket_accept_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_accept_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); // On success, assign new connection to peer socket object. if (owner) o->do_assign(); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; #if defined(ASIO_HAS_MOVE) template class reactive_socket_move_accept_op : private Protocol::socket::template rebind_executor::other, public reactive_socket_accept_op_base< typename Protocol::socket::template rebind_executor::other, Protocol> { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_move_accept_op); reactive_socket_move_accept_op(const PeerIoExecutor& peer_io_ex, socket_type socket, socket_ops::state_type state, const Protocol& protocol, typename Protocol::endpoint* peer_endpoint, Handler& handler, const IoExecutor& io_ex) : peer_socket_type(peer_io_ex), reactive_socket_accept_op_base( socket, state, *this, protocol, peer_endpoint, &reactive_socket_move_accept_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_move_accept_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); // On success, assign new connection to peer socket object. if (owner) o->do_assign(); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::move_binder2 handler(0, ASIO_MOVE_CAST(Handler)(o->handler_), o->ec_, ASIO_MOVE_CAST(peer_socket_type)(*o)); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: typedef typename Protocol::socket::template rebind_executor::other peer_socket_type; Handler handler_; IoExecutor io_executor_; }; #endif // defined(ASIO_HAS_MOVE) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_ACCEPT_OP_HPP galera-4-26.4.25/asio/asio/detail/atomic_count.hpp000644 000164 177776 00000002447 15107057155 023050 0ustar00jenkinsnogroup000000 000000 // // detail/atomic_count.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ATOMIC_COUNT_HPP #define ASIO_DETAIL_ATOMIC_COUNT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) // Nothing to include. #elif defined(ASIO_HAS_STD_ATOMIC) # include #else // defined(ASIO_HAS_STD_ATOMIC) # include #endif // defined(ASIO_HAS_STD_ATOMIC) namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) typedef long atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #elif defined(ASIO_HAS_STD_ATOMIC) typedef std::atomic atomic_count; inline void increment(atomic_count& a, long b) { a += b; } #else // defined(ASIO_HAS_STD_ATOMIC) typedef boost::detail::atomic_count atomic_count; inline void increment(atomic_count& a, long b) { while (b > 0) ++a, --b; } #endif // defined(ASIO_HAS_STD_ATOMIC) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ATOMIC_COUNT_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_recv_op.hpp000644 000164 177776 00000010601 15107057155 025242 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recv_op_base : public reactor_op { public: reactive_socket_recv_op_base(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recv_op_base::do_perform, complete_func), socket_(socket), state_(state), buffers_(buffers), flags_(flags) { } static status do_perform(reactor_op* base) { reactive_socket_recv_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = socket_ops::non_blocking_recv(o->socket_, bufs.buffers(), bufs.count(), o->flags_, (o->state_ & socket_ops::stream_oriented) != 0, o->ec_, o->bytes_transferred_) ? done : not_done; if (result == done) if ((o->state_ & socket_ops::stream_oriented) != 0) if (o->bytes_transferred_ == 0) result = done_and_exhausted; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_recv", o->ec_, o->bytes_transferred_)); return result; } private: socket_type socket_; socket_ops::state_type state_; MutableBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_recv_op : public reactive_socket_recv_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recv_op); reactive_socket_recv_op(socket_type socket, socket_ops::state_type state, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) : reactive_socket_recv_op_base(socket, state, buffers, flags, &reactive_socket_recv_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECV_OP_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_recvmsg_op.hpp000644 000164 177776 00000010545 15107057155 025760 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvmsg_op_base : public reactor_op { public: reactive_socket_recvmsg_op_base(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, func_type complete_func) : reactor_op(&reactive_socket_recvmsg_op_base::do_perform, complete_func), socket_(socket), buffers_(buffers), in_flags_(in_flags), out_flags_(out_flags) { } static status do_perform(reactor_op* base) { reactive_socket_recvmsg_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = socket_ops::non_blocking_recvmsg(o->socket_, bufs.buffers(), bufs.count(), o->in_flags_, o->out_flags_, o->ec_, o->bytes_transferred_) ? done : not_done; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_recvmsg", o->ec_, o->bytes_transferred_)); return result; } private: socket_type socket_; MutableBufferSequence buffers_; socket_base::message_flags in_flags_; socket_base::message_flags& out_flags_; }; template class reactive_socket_recvmsg_op : public reactive_socket_recvmsg_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvmsg_op); reactive_socket_recvmsg_op(socket_type socket, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) : reactive_socket_recvmsg_op_base(socket, buffers, in_flags, out_flags, &reactive_socket_recvmsg_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVMSG_OP_HPP galera-4-26.4.25/asio/asio/detail/posix_signal_blocker.hpp000644 000164 177776 00000003532 15107057155 024560 0ustar00jenkinsnogroup000000 000000 // // detail/posix_signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_signal_blocker : private noncopyable { public: // Constructor blocks all signals for the calling thread. posix_signal_blocker() : blocked_(false) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } // Destructor restores the previous signal mask. ~posix_signal_blocker() { if (blocked_) pthread_sigmask(SIG_SETMASK, &old_mask_, 0); } // Block all signals for the calling thread. void block() { if (!blocked_) { sigset_t new_mask; sigfillset(&new_mask); blocked_ = (pthread_sigmask(SIG_BLOCK, &new_mask, &old_mask_) == 0); } } // Restore the previous signal mask. void unblock() { if (blocked_) blocked_ = (pthread_sigmask(SIG_SETMASK, &old_mask_, 0) != 0); } private: // Have signals been blocked. bool blocked_; // The previous signal mask. sigset_t old_mask_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_SIGNAL_BLOCKER_HPP galera-4-26.4.25/asio/asio/detail/signal_blocker.hpp000644 000164 177776 00000002353 15107057155 023336 0ustar00jenkinsnogroup000000 000000 // // detail/signal_blocker.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_BLOCKER_HPP #define ASIO_DETAIL_SIGNAL_BLOCKER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/null_signal_blocker.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_signal_blocker.hpp" #else # error Only Windows and POSIX are supported! #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) || defined(ASIO_WINDOWS) \ || defined(ASIO_WINDOWS_RUNTIME) \ || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef null_signal_blocker signal_blocker; #elif defined(ASIO_HAS_PTHREADS) typedef posix_signal_blocker signal_blocker; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_SIGNAL_BLOCKER_HPP galera-4-26.4.25/asio/asio/detail/pipe_select_interrupter.hpp000644 000164 177776 00000004576 15107057155 025330 0ustar00jenkinsnogroup000000 000000 // // detail/pipe_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) #if !defined(ASIO_WINDOWS_RUNTIME) #if !defined(__CYGWIN__) #if !defined(__SYMBIAN32__) #if !defined(ASIO_HAS_EVENTFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class pipe_select_interrupter { public: // Constructor. ASIO_DECL pipe_select_interrupter(); // Destructor. ASIO_DECL ~pipe_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. int read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. int read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. int write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/pipe_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_EVENTFD) #endif // !defined(__SYMBIAN32__) #endif // !defined(__CYGWIN__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_PIPE_SELECT_INTERRUPTER_HPP galera-4-26.4.25/asio/asio/detail/concurrency_hint.hpp000644 000164 177776 00000007422 15107057155 023736 0ustar00jenkinsnogroup000000 000000 // // detail/concurrency_hint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONCURRENCY_HINT_HPP #define ASIO_DETAIL_CONCURRENCY_HINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" // The concurrency hint ID and mask are used to identify when a "well-known" // concurrency hint value has been passed to the io_context. #define ASIO_CONCURRENCY_HINT_ID 0xA5100000u #define ASIO_CONCURRENCY_HINT_ID_MASK 0xFFFF0000u // If set, this bit indicates that the scheduler should perform locking. #define ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER 0x1u // If set, this bit indicates that the reactor should perform locking when // managing descriptor registrations. #define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION 0x2u // If set, this bit indicates that the reactor should perform locking for I/O. #define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO 0x4u // Helper macro to determine if we have a special concurrency hint. #define ASIO_CONCURRENCY_HINT_IS_SPECIAL(hint) \ ((static_cast(hint) \ & ASIO_CONCURRENCY_HINT_ID_MASK) \ == ASIO_CONCURRENCY_HINT_ID) // Helper macro to determine if locking is enabled for a given facility. #define ASIO_CONCURRENCY_HINT_IS_LOCKING(facility, hint) \ (((static_cast(hint) \ & (ASIO_CONCURRENCY_HINT_ID_MASK \ | ASIO_CONCURRENCY_HINT_LOCKING_ ## facility)) \ ^ ASIO_CONCURRENCY_HINT_ID) != 0) // This special concurrency hint disables locking in both the scheduler and // reactor I/O. This hint has the following restrictions: // // - Care must be taken to ensure that all operations on the io_context and any // of its associated I/O objects (such as sockets and timers) occur in only // one thread at a time. // // - Asynchronous resolve operations fail with operation_not_supported. // // - If a signal_set is used with the io_context, signal_set objects cannot be // used with any other io_context in the program. #define ASIO_CONCURRENCY_HINT_UNSAFE \ static_cast(ASIO_CONCURRENCY_HINT_ID) // This special concurrency hint disables locking in the reactor I/O. This hint // has the following restrictions: // // - Care must be taken to ensure that run functions on the io_context, and all // operations on the io_context's associated I/O objects (such as sockets and // timers), occur in only one thread at a time. #define ASIO_CONCURRENCY_HINT_UNSAFE_IO \ static_cast(ASIO_CONCURRENCY_HINT_ID \ | ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \ | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION) // The special concurrency hint provides full thread safety. #define ASIO_CONCURRENCY_HINT_SAFE \ static_cast(ASIO_CONCURRENCY_HINT_ID \ | ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \ | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION \ | ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO) // This #define may be overridden at compile time to specify a program-wide // default concurrency hint, used by the zero-argument io_context constructor. #if !defined(ASIO_CONCURRENCY_HINT_DEFAULT) # define ASIO_CONCURRENCY_HINT_DEFAULT -1 #endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT) // This #define may be overridden at compile time to specify a program-wide // concurrency hint, used by the one-argument io_context constructor when // passed a value of 1. #if !defined(ASIO_CONCURRENCY_HINT_1) # define ASIO_CONCURRENCY_HINT_1 1 #endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT) #endif // ASIO_DETAIL_CONCURRENCY_HINT_HPP galera-4-26.4.25/asio/asio/detail/reactive_descriptor_service.hpp000644 000164 177776 00000031603 15107057155 026140 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_descriptor_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) #include "asio/buffer.hpp" #include "asio/execution_context.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/descriptor_read_op.hpp" #include "asio/detail/descriptor_write_op.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_wait_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/posix/descriptor_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_descriptor_service : public execution_context_service_base { public: // The native type of a descriptor. typedef int native_handle_type; // The implementation type of the descriptor. class implementation_type : private asio::detail::noncopyable { public: // Default constructor. implementation_type() : descriptor_(-1), state_(0) { } private: // Only this service will have access to the internal values. friend class reactive_descriptor_service; // The native descriptor representation. int descriptor_; // The current state of the descriptor. descriptor_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_descriptor_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new descriptor implementation. ASIO_DECL void construct(implementation_type& impl); // Move-construct a new descriptor implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another descriptor implementation. ASIO_DECL void move_assign(implementation_type& impl, reactive_descriptor_service& other_service, implementation_type& other_impl); // Destroy a descriptor implementation. ASIO_DECL void destroy(implementation_type& impl); // Assign a native descriptor to a descriptor implementation. ASIO_DECL asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec); // Determine whether the descriptor is open. bool is_open(const implementation_type& impl) const { return impl.descriptor_ != -1; } // Destroy a descriptor implementation. ASIO_DECL asio::error_code close(implementation_type& impl, asio::error_code& ec); // Get the native descriptor representation. native_handle_type native_handle(const implementation_type& impl) const { return impl.descriptor_; } // Release ownership of the native descriptor representation. ASIO_DECL native_handle_type release(implementation_type& impl); // Cancel all operations associated with the descriptor. ASIO_DECL asio::error_code cancel(implementation_type& impl, asio::error_code& ec); // Perform an IO control command on the descriptor. template asio::error_code io_control(implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { descriptor_ops::ioctl(impl.descriptor_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the descriptor. bool non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the descriptor. asio::error_code non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_user_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native descriptor implementation. bool native_non_blocking(const implementation_type& impl) const { return (impl.state_ & descriptor_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native descriptor implementation. asio::error_code native_non_blocking(implementation_type& impl, bool mode, asio::error_code& ec) { descriptor_ops::set_internal_non_blocking( impl.descriptor_, impl.state_, mode, ec); return ec; } // Wait for the descriptor to become ready to read, ready to write, or to have // pending error conditions. asio::error_code wait(implementation_type& impl, posix::descriptor_base::wait_type w, asio::error_code& ec) { switch (w) { case posix::descriptor_base::wait_read: descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec); break; case posix::descriptor_base::wait_write: descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec); break; case posix::descriptor_base::wait_error: descriptor_ops::poll_error(impl.descriptor_, impl.state_, ec); break; default: ec = asio::error::invalid_argument; break; } return ec; } // Asynchronously wait for the descriptor to become ready to read, ready to // write, or to have pending error conditions. template void async_wait(implementation_type& impl, posix::descriptor_base::wait_type w, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_wait_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "descriptor", &impl, impl.descriptor_, "async_wait")); int op_type; switch (w) { case posix::descriptor_base::wait_read: op_type = reactor::read_op; break; case posix::descriptor_base::wait_write: op_type = reactor::write_op; break; case posix::descriptor_base::wait_error: op_type = reactor::except_op; break; default: p.p->ec_ = asio::error::invalid_argument; reactor_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; return; } start_op(impl, op_type, p.p, is_continuation, false, false); p.v = p.p = 0; } // Write some data to the descriptor. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_write(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be written without blocking. size_t write_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_write(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous write. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_write_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "descriptor", &impl, impl.descriptor_, "async_write_some")); start_op(impl, reactor::write_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Start an asynchronous wait until data can be written without blocking. template void async_write_some(implementation_type& impl, const null_buffers&, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "descriptor", &impl, impl.descriptor_, "async_write_some(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Read some data from the stream. Returns the number of bytes read. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return descriptor_ops::sync_read(impl.descriptor_, impl.state_, bufs.buffers(), bufs.count(), bufs.all_empty(), ec); } // Wait until data can be read without blocking. size_t read_some(implementation_type& impl, const null_buffers&, asio::error_code& ec) { // Wait for descriptor to become ready. descriptor_ops::poll_read(impl.descriptor_, impl.state_, ec); return 0; } // Start an asynchronous read. The buffer for the data being read must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef descriptor_read_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.descriptor_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "descriptor", &impl, impl.descriptor_, "async_read_some")); start_op(impl, reactor::read_op, p.p, is_continuation, true, buffer_sequence_adapter::all_empty(buffers)); p.v = p.p = 0; } // Wait until data can be read without blocking. template void async_read_some(implementation_type& impl, const null_buffers&, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "descriptor", &impl, impl.descriptor_, "async_read_some(null_buffers)")); start_op(impl, reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } private: // Start the asynchronous operation. ASIO_DECL void start_op(implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_descriptor_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // ASIO_DETAIL_REACTIVE_DESCRIPTOR_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_handle_write_op.hpp000644 000164 177776 00000006545 15107057155 025421 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_handle_write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #define ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_handle_write_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_handle_write_op); win_iocp_handle_write_op(const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_handle_write_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_handle_write_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (owner) { // Check whether buffers are still valid. buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_HANDLE_WRITE_OP_HPP galera-4-26.4.25/asio/asio/detail/socket_select_interrupter.hpp000644 000164 177776 00000004625 15107057155 025656 0ustar00jenkinsnogroup000000 000000 // // detail/socket_select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(__SYMBIAN32__) #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class socket_select_interrupter { public: // Constructor. ASIO_DECL socket_select_interrupter(); // Destructor. ASIO_DECL ~socket_select_interrupter(); // Recreate the interrupter's descriptors. Used after a fork. ASIO_DECL void recreate(); // Interrupt the select call. ASIO_DECL void interrupt(); // Reset the select interrupt. Returns true if the call was interrupted. ASIO_DECL bool reset(); // Get the read descriptor to be passed to select. socket_type read_descriptor() const { return read_descriptor_; } private: // Open the descriptors. Throws on error. ASIO_DECL void open_descriptors(); // Close the descriptors. ASIO_DECL void close_descriptors(); // The read end of a connection used to interrupt the select call. This file // descriptor is passed to select such that when it is time to stop, a single // byte will be written on the other end of the connection and this // descriptor will become readable. socket_type read_descriptor_; // The write end of a connection used to interrupt the select call. A single // byte may be written to this to wake up the select which is waiting for the // other end to become readable. socket_type write_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/socket_select_interrupter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) // || defined(__CYGWIN__) // || defined(__SYMBIAN32__) #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SOCKET_SELECT_INTERRUPTER_HPP galera-4-26.4.25/asio/asio/detail/winrt_socket_connect_op.hpp000644 000164 177776 00000005553 15107057155 025307 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_connect_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_connect_op); winrt_socket_connect_op(Handler& handler, const IoExecutor& io_ex) : winrt_async_op(&winrt_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_connect_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_CONNECT_OP_HPP galera-4-26.4.25/asio/asio/detail/winapp_thread.hpp000644 000164 177776 00000005034 15107057155 023204 0ustar00jenkinsnogroup000000 000000 // // detail/winapp_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINAPP_THREAD_HPP #define ASIO_DETAIL_WINAPP_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && defined(ASIO_WINDOWS_APP) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD WINAPI winapp_thread_function(LPVOID arg); class winapp_thread : private noncopyable { public: // Constructor. template winapp_thread(Function f, unsigned int = 0) { scoped_ptr arg(new func(f)); DWORD thread_id = 0; thread_ = ::CreateThread(0, 0, winapp_thread_function, arg.get(), 0, &thread_id); if (!thread_) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } arg.release(); } // Destructor. ~winapp_thread() { ::CloseHandle(thread_); } // Wait for the thread to exit. void join() { ::WaitForSingleObjectEx(thread_, INFINITE, false); } // Get number of CPUs. static std::size_t hardware_concurrency() { SYSTEM_INFO system_info; ::GetNativeSystemInfo(&system_info); return system_info.dwNumberOfProcessors; } private: friend DWORD WINAPI winapp_thread_function(LPVOID arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ::HANDLE thread_; }; inline DWORD WINAPI winapp_thread_function(LPVOID arg) { scoped_ptr func( static_cast(arg)); func->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && defined(ASIO_WINDOWS_APP) #endif // ASIO_DETAIL_WINAPP_THREAD_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_connect_op.hpp000644 000164 177776 00000006720 15107057155 025743 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_connect_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_connect_op_base : public reactor_op { public: reactive_socket_connect_op_base(socket_type socket, func_type complete_func) : reactor_op(&reactive_socket_connect_op_base::do_perform, complete_func), socket_(socket) { } static status do_perform(reactor_op* base) { reactive_socket_connect_op_base* o( static_cast(base)); status result = socket_ops::non_blocking_connect( o->socket_, o->ec_) ? done : not_done; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_connect", o->ec_)); return result; } private: socket_type socket_; }; template class reactive_socket_connect_op : public reactive_socket_connect_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_connect_op); reactive_socket_connect_op(socket_type socket, Handler& handler, const IoExecutor& io_ex) : reactive_socket_connect_op_base(socket, &reactive_socket_connect_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_connect_op* o (static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder1 handler(o->handler_, o->ec_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_CONNECT_OP_HPP galera-4-26.4.25/asio/asio/detail/reactive_serial_port_service.hpp000644 000164 177776 00000017311 15107057155 026305 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/serial_port_base.hpp" #include "asio/detail/descriptor_ops.hpp" #include "asio/detail/reactive_descriptor_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend reactive_descriptor_service to provide serial port support. class reactive_serial_port_service : public execution_context_service_base { public: // The native type of a serial port. typedef reactive_descriptor_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef reactive_descriptor_service::implementation_type implementation_type; ASIO_DECL reactive_serial_port_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new serial port implementation. void construct(implementation_type& impl) { descriptor_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { descriptor_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, reactive_serial_port_service& other_service, implementation_type& other_impl) { descriptor_service_.move_assign(impl, other_service.descriptor_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { descriptor_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native descriptor to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& native_descriptor, asio::error_code& ec) { return descriptor_service_.assign(impl, native_descriptor, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return descriptor_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return descriptor_service_.native_handle(impl); } // Cancel all operations associated with the serial port. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return descriptor_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &reactive_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &reactive_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type& impl, asio::error_code& ec) { errno = 0; descriptor_ops::error_wrapper(::tcsendbreak( descriptor_service_.native_handle(impl), 0), ec); return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { descriptor_service_.async_write_some(impl, buffers, handler, io_ex); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return descriptor_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { descriptor_service_.async_read_some(impl, buffers, handler, io_ex); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, termios&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, termios& storage, asio::error_code& ec) { static_cast(option)->store(storage, ec); return ec; } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const termios&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const termios& storage, asio::error_code& ec) { static_cast(option)->load(storage, ec); return ec; } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. reactive_descriptor_service descriptor_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_REACTIVE_SERIAL_PORT_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/array.hpp000644 000164 177776 00000001606 15107057155 021476 0ustar00jenkinsnogroup000000 000000 // // detail/array.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ARRAY_HPP #define ASIO_DETAIL_ARRAY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ARRAY) # include #else // defined(ASIO_HAS_STD_ARRAY) # include #endif // defined(ASIO_HAS_STD_ARRAY) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_ARRAY) using std::array; #else // defined(ASIO_HAS_STD_ARRAY) using boost::array; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_ARRAY_HPP galera-4-26.4.25/asio/asio/detail/timer_queue_base.hpp000644 000164 177776 00000003175 15107057155 023701 0ustar00jenkinsnogroup000000 000000 // // detail/timer_queue_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #define ASIO_DETAIL_TIMER_QUEUE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class timer_queue_base : private noncopyable { public: // Constructor. timer_queue_base() : next_(0) {} // Destructor. virtual ~timer_queue_base() {} // Whether there are no timers in the queue. virtual bool empty() const = 0; // Get the time to wait until the next timer. virtual long wait_duration_msec(long max_duration) const = 0; // Get the time to wait until the next timer. virtual long wait_duration_usec(long max_duration) const = 0; // Dequeue all ready timers. virtual void get_ready_timers(op_queue& ops) = 0; // Dequeue all timers. virtual void get_all_timers(op_queue& ops) = 0; private: friend class timer_queue_set; // Next timer queue in the set. timer_queue_base* next_; }; template class timer_queue; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_BASE_HPP galera-4-26.4.25/asio/asio/detail/winrt_socket_recv_op.hpp000644 000164 177776 00000007157 15107057155 024617 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_recv_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_recv_op); winrt_socket_recv_op(const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : winrt_async_op( &winrt_socket_recv_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) std::size_t bytes_transferred = o->result_ ? o->result_->Length : 0; if (bytes_transferred == 0 && !o->ec_ && !buffer_sequence_adapter::all_empty(o->buffers_)) { o->ec_ = asio::error::eof; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: MutableBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_RECV_OP_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_send_op.hpp000644 000164 177776 00000007057 15107057155 025254 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_send_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_send_op); win_iocp_socket_send_op(socket_ops::weak_cancel_token_type cancel_token, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_send_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_send(o->cancel_token_, ec); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; ConstBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SEND_OP_HPP galera-4-26.4.25/asio/asio/detail/is_executor.hpp000644 000164 177776 00000005610 15107057155 022710 0ustar00jenkinsnogroup000000 000000 // // detail/is_executor.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IS_EXECUTOR_HPP #define ASIO_DETAIL_IS_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct executor_memfns_base { void context(); void on_work_started(); void on_work_finished(); void dispatch(); void post(); void defer(); }; template struct executor_memfns_derived : T, executor_memfns_base { }; template struct executor_memfns_check { }; template char (&context_memfn_helper(...))[2]; template char context_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::context>*); template char (&on_work_started_memfn_helper(...))[2]; template char on_work_started_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::on_work_started>*); template char (&on_work_finished_memfn_helper(...))[2]; template char on_work_finished_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::on_work_finished>*); template char (&dispatch_memfn_helper(...))[2]; template char dispatch_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::dispatch>*); template char (&post_memfn_helper(...))[2]; template char post_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::post>*); template char (&defer_memfn_helper(...))[2]; template char defer_memfn_helper( executor_memfns_check< void (executor_memfns_base::*)(), &executor_memfns_derived::defer>*); template struct is_executor_class : integral_constant(0)) != 1 && sizeof(on_work_started_memfn_helper(0)) != 1 && sizeof(on_work_finished_memfn_helper(0)) != 1 && sizeof(dispatch_memfn_helper(0)) != 1 && sizeof(post_memfn_helper(0)) != 1 && sizeof(defer_memfn_helper(0)) != 1> { }; template struct is_executor : conditional::value, is_executor_class, false_type>::type { }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IS_EXECUTOR_HPP galera-4-26.4.25/asio/asio/detail/solaris_fenced_block.hpp000644 000164 177776 00000002351 15107057155 024510 0ustar00jenkinsnogroup000000 000000 // // detail/solaris_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #define ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__sun) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class solaris_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit solaris_fenced_block(half_t) { } // Constructor for a full fenced block. explicit solaris_fenced_block(full_t) { membar_consumer(); } // Destructor. ~solaris_fenced_block() { membar_producer(); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__sun) #endif // ASIO_DETAIL_SOLARIS_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/fd_set_adapter.hpp000644 000164 177776 00000001713 15107057155 023323 0ustar00jenkinsnogroup000000 000000 // // detail/fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/posix_fd_set_adapter.hpp" #include "asio/detail/win_fd_set_adapter.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef win_fd_set_adapter fd_set_adapter; #else typedef posix_fd_set_adapter fd_set_adapter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_FD_SET_ADAPTER_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_service_base.hpp000644 000164 177776 00000054175 15107057155 026262 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/socket_base.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_reactor.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/win_iocp_io_context.hpp" #include "asio/detail/win_iocp_null_buffers_op.hpp" #include "asio/detail/win_iocp_socket_connect_op.hpp" #include "asio/detail/win_iocp_socket_send_op.hpp" #include "asio/detail/win_iocp_socket_recv_op.hpp" #include "asio/detail/win_iocp_socket_recvmsg_op.hpp" #include "asio/detail/win_iocp_wait_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_socket_service_base { public: // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // We use a shared pointer as a cancellation token here to work around the // broken Windows support for cancellation. MSDN says that when you call // closesocket any outstanding WSARecv or WSASend operations will complete // with the error ERROR_OPERATION_ABORTED. In practice they complete with // ERROR_NETNAME_DELETED, which means you can't tell the difference between // a local cancellation and the socket being hard-closed by the peer. socket_ops::shared_cancel_token_type cancel_token_; // Per-descriptor data used by the reactor. select_reactor::per_descriptor_data reactor_data_; #if defined(ASIO_ENABLE_CANCELIO) // The ID of the thread from which it is safe to cancel asynchronous // operations. 0 means no asynchronous operations have been started yet. // ~0 means asynchronous operations have been started from more than one // thread, and cancellation is not supported for the socket. DWORD safe_cancellation_thread_id_; #endif // defined(ASIO_ENABLE_CANCELIO) // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL win_iocp_socket_service_base(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void base_shutdown(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, win_iocp_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Release ownership of the socket. ASIO_DECL socket_type release( base_implementation_type& impl, asio::error_code& ec); // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Wait for the socket to become ready to read, ready to write, or to have // pending error conditions. asio::error_code wait(base_implementation_type& impl, socket_base::wait_type w, asio::error_code& ec) { switch (w) { case socket_base::wait_read: socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); break; case socket_base::wait_write: socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); break; case socket_base::wait_error: socket_ops::poll_error(impl.socket_, impl.state_, -1, ec); break; default: ec = asio::error::invalid_argument; break; } return ec; } // Asynchronously wait for the socket to become ready to read, ready to // write, or to have pending error conditions. template void async_wait(base_implementation_type& impl, socket_base::wait_type w, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef win_iocp_wait_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_wait")); switch (w) { case socket_base::wait_read: start_null_buffers_receive_op(impl, 0, p.p); break; case socket_base::wait_write: start_reactor_op(impl, select_reactor::write_op, p.p); break; case socket_base::wait_error: start_reactor_op(impl, select_reactor::except_op, p.p); break; default: p.p->ec_ = asio::error::invalid_argument; iocp_service_.post_immediate_completion(p.p, is_continuation); break; } p.v = p.p = 0; } // Send the given data to the peer. Returns the number of bytes sent. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_send_op< ConstBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_send")); buffer_sequence_adapter bufs(buffers); start_send_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_send(null_buffers)")); start_reactor_op(impl, select_reactor::write_op, p.p); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recv_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.state_, impl.cancel_token_, buffers, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), flags, (impl.state_ & socket_ops::stream_oriented) != 0 && bufs.all_empty(), p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive(null_buffers)")); start_null_buffers_receive_op(impl, flags, p.p); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_socket_recvmsg_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, buffers, out_flags, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive_with_flags")); buffer_sequence_adapter bufs(buffers); start_receive_op(impl, bufs.buffers(), bufs.count(), in_flags, false, p.p); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) { // Allocate and construct an operation to wrap the handler. typedef win_iocp_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.cancel_token_, handler, io_ex); ASIO_HANDLER_CREATION((context_, *p.p, "socket", &impl, impl.socket_, "async_receive_with_flags(null_buffers)")); // Reset out_flags since it can be given no sensible value at this time. out_flags = 0; start_null_buffers_receive_op(impl, in_flags, p.p); p.v = p.p = 0; } // Helper function to restart an asynchronous accept operation. ASIO_DECL void restart_accept_op(socket_type s, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int family, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, socket_type native_socket, asio::error_code& ec); // Helper function to start an asynchronous send operation. ASIO_DECL void start_send_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous send_to operation. ASIO_DECL void start_send_to_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, const socket_addr_type* addr, int addrlen, socket_base::message_flags flags, operation* op); // Helper function to start an asynchronous receive operation. ASIO_DECL void start_receive_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_base::message_flags flags, bool noop, operation* op); // Helper function to start an asynchronous null_buffers receive operation. ASIO_DECL void start_null_buffers_receive_op( base_implementation_type& impl, socket_base::message_flags flags, reactor_op* op); // Helper function to start an asynchronous receive_from operation. ASIO_DECL void start_receive_from_op(base_implementation_type& impl, WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr, socket_base::message_flags flags, int* addrlen, operation* op); // Helper function to start an asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, bool peer_is_open, socket_holder& new_socket, int family, int type, int protocol, void* output_buffer, DWORD address_length, operation* op); // Start an asynchronous read or write operation using the reactor. ASIO_DECL void start_reactor_op(base_implementation_type& impl, int op_type, reactor_op* op); // Start the asynchronous connect operation using the reactor. ASIO_DECL void start_connect_op(base_implementation_type& impl, int family, int type, const socket_addr_type* remote_addr, std::size_t remote_addrlen, win_iocp_socket_connect_op_base* op); // Helper function to close a socket when the associated object is being // destroyed. ASIO_DECL void close_for_destruction(base_implementation_type& impl); // Update the ID of the thread from which cancellation is safe. ASIO_DECL void update_cancellation_thread_id( base_implementation_type& impl); // Helper function to get the reactor. If no reactor has been created yet, a // new one is obtained from the execution context and a pointer to it is // cached in this service. ASIO_DECL select_reactor& get_reactor(); // The type of a ConnectEx function pointer, as old SDKs may not provide it. typedef BOOL (PASCAL *connect_ex_fn)(SOCKET, const socket_addr_type*, int, void*, DWORD, DWORD*, OVERLAPPED*); // Helper function to get the ConnectEx pointer. If no ConnectEx pointer has // been obtained yet, one is obtained using WSAIoctl and the pointer is // cached. Returns a null pointer if ConnectEx is not available. ASIO_DECL connect_ex_fn get_connect_ex( base_implementation_type& impl, int type); // The type of a NtSetInformationFile function pointer. typedef LONG (NTAPI *nt_set_info_fn)(HANDLE, ULONG_PTR*, void*, ULONG, ULONG); // Helper function to get the NtSetInformationFile function pointer. If no // NtSetInformationFile pointer has been obtained yet, one is obtained using // GetProcAddress and the pointer is cached. Returns a null pointer if // NtSetInformationFile is not available. ASIO_DECL nt_set_info_fn get_nt_set_info(); // Helper function to emulate InterlockedCompareExchangePointer functionality // for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_compare_exchange_pointer( void** dest, void* exch, void* cmp); // Helper function to emulate InterlockedExchangePointer functionality for: // - very old Platform SDKs; and // - platform SDKs where MSVC's /Wp64 option causes spurious warnings. ASIO_DECL void* interlocked_exchange_pointer(void** dest, void* val); // The execution context used to obtain the reactor, if required. execution_context& context_; // The IOCP service used for running asynchronous operations and dispatching // handlers. win_iocp_io_context& iocp_service_; // The reactor used for performing connect operations. This object is created // only if needed. select_reactor* reactor_; // Pointer to ConnectEx implementation. void* connect_ex_; // Pointer to NtSetInformationFile implementation. void* nt_set_info_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_SERVICE_BASE_HPP galera-4-26.4.25/asio/asio/detail/limits.hpp000644 000164 177776 00000001264 15107057155 021661 0ustar00jenkinsnogroup000000 000000 // // detail/limits.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_LIMITS_HPP #define ASIO_DETAIL_LIMITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_LIMITS) # include #else // defined(ASIO_HAS_BOOST_LIMITS) # include #endif // defined(ASIO_HAS_BOOST_LIMITS) #endif // ASIO_DETAIL_LIMITS_HPP galera-4-26.4.25/asio/asio/detail/winrt_resolve_op.hpp000644 000164 177776 00000007204 15107057155 023760 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_resolve_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #define ASIO_DETAIL_WINRT_RESOLVE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolve_op : public winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^> { public: ASIO_DEFINE_HANDLER_PTR(winrt_resolve_op); typedef typename Protocol::endpoint endpoint_type; typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_results results_type; winrt_resolve_op(const query_type& query, Handler& handler, const IoExecutor& io_ex) : winrt_async_op< Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^>( &winrt_resolve_op::do_complete), query_(query), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_resolve_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); results_type results = results_type(); if (!o->ec_) { try { results = results_type::create(o->result_, o->query_.hints(), o->query_.host_name(), o->query_.service_name()); } catch (Platform::Exception^ e) { o->ec_ = asio::error_code(e->HResult, asio::system_category()); } } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, results); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: query_type query_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVE_OP_HPP galera-4-26.4.25/asio/asio/detail/posix_tss_ptr.hpp000644 000164 177776 00000003223 15107057155 023275 0ustar00jenkinsnogroup000000 000000 // // detail/posix_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_TSS_PTR_HPP #define ASIO_DETAIL_POSIX_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL void posix_tss_ptr_create(pthread_key_t& key); template class posix_tss_ptr : private noncopyable { public: // Constructor. posix_tss_ptr() { posix_tss_ptr_create(tss_key_); } // Destructor. ~posix_tss_ptr() { ::pthread_key_delete(tss_key_); } // Get the value. operator T*() const { return static_cast(::pthread_getspecific(tss_key_)); } // Set the value. void operator=(T* value) { ::pthread_setspecific(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. pthread_key_t tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_TSS_PTR_HPP galera-4-26.4.25/asio/asio/detail/non_const_lvalue.hpp000644 000164 177776 00000002577 15107057155 023740 0ustar00jenkinsnogroup000000 000000 // // detail/non_const_lvalue.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NON_CONST_LVALUE_HPP #define ASIO_DETAIL_NON_CONST_LVALUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct non_const_lvalue { #if defined(ASIO_HAS_MOVE) explicit non_const_lvalue(T& t) : value(static_cast::type>::value, typename decay::type&, T&&>::type>(t)) { } typename conditional::type>::value, typename decay::type&, typename decay::type>::type value; #else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) explicit non_const_lvalue(const typename decay::type& t) : value(t) { } typename decay::type value; #endif // defined(ASIO_HAS_MOVE) }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NON_CONST_LVALUE_HPP galera-4-26.4.25/asio/asio/detail/null_mutex.hpp000644 000164 177776 00000002175 15107057155 022556 0ustar00jenkinsnogroup000000 000000 // // detail/null_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_MUTEX_HPP #define ASIO_DETAIL_NULL_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. null_mutex() { } // Destructor. ~null_mutex() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/future.hpp000644 000164 177776 00000002227 15107057155 021672 0ustar00jenkinsnogroup000000 000000 // // detail/future.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FUTURE_HPP #define ASIO_DETAIL_FUTURE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_FUTURE) # include // Even though the future header is available, libstdc++ may not implement the // std::future class itself. However, we need to have already included the // future header to reliably test for _GLIBCXX_HAS_GTHREADS. # if defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX) # if defined(_GLIBCXX_HAS_GTHREADS) # define ASIO_HAS_STD_FUTURE_CLASS 1 # endif // defined(_GLIBCXX_HAS_GTHREADS) # else // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_FUTURE_CLASS 1 # endif // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX) #endif // defined(ASIO_HAS_STD_FUTURE) #endif // ASIO_DETAIL_FUTURE_HPP galera-4-26.4.25/asio/asio/detail/epoll_reactor.hpp000644 000164 177776 00000021563 15107057155 023216 0ustar00jenkinsnogroup000000 000000 // // detail/epoll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EPOLL_REACTOR_HPP #define ASIO_DETAIL_EPOLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_EPOLL) #include "asio/detail/atomic_count.hpp" #include "asio/detail/conditionally_enabled_mutex.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_TIMERFD) # include #endif // defined(ASIO_HAS_TIMERFD) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class epoll_reactor : public execution_context_service_base { private: // The mutex type used by this reactor. typedef conditionally_enabled_mutex mutex; public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. class descriptor_state : operation { friend class epoll_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; epoll_reactor* reactor_; int descriptor_; uint32_t registered_events_; op_queue op_queue_[max_ops]; bool try_speculative_[max_ops]; bool shutdown_; ASIO_DECL descriptor_state(bool locking); void set_ready_events(uint32_t events) { task_result_ = events; } void add_ready_events(uint32_t events) { task_result_ |= events; } ASIO_DECL operation* perform_io(uint32_t events); ASIO_DECL static void do_complete( void* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred); }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL epoll_reactor(asio::execution_context& ctx); // Destructor. ASIO_DECL ~epoll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Recreate internal descriptors following a fork. ASIO_DECL void notify_fork( asio::execution_context::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { scheduler_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. The reactor resources associated with // the descriptor must be released by calling cleanup_descriptor_data. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remove the descriptor's registration from the reactor. The reactor // resources associated with the descriptor must be released by calling // cleanup_descriptor_data. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Perform any post-deregistration cleanup tasks associated with the // descriptor data. ASIO_DECL void cleanup_descriptor_data( per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source); // Run epoll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(long usec, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // The hint to pass to epoll_create to size its data structures. enum { epoll_size = 20000 }; // Create the epoll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_epoll_create(); // Create the timerfd file descriptor. Does not throw. ASIO_DECL static int do_timerfd_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Get the timeout value for the epoll_wait call. The timeout value is // returned as a number of milliseconds. A return value of -1 indicates // that epoll_wait should block indefinitely. ASIO_DECL int get_timeout(int msec); #if defined(ASIO_HAS_TIMERFD) // Get the timeout value for the timer descriptor. The return value is the // flag argument to be used when calling timerfd_settime. ASIO_DECL int get_timeout(itimerspec& ts); #endif // defined(ASIO_HAS_TIMERFD) // The scheduler implementation used to post completions. scheduler& scheduler_; // Mutex to protect access to internal data. mutex mutex_; // The interrupter is used to break a blocking epoll_wait call. select_interrupter interrupter_; // The epoll file descriptor. int epoll_fd_; // The timer file descriptor. int timer_fd_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; // Helper class to do post-perform_io cleanup. struct perform_io_cleanup_on_block_exit; friend struct perform_io_cleanup_on_block_exit; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/epoll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/epoll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_EPOLL) #endif // ASIO_DETAIL_EPOLL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/std_fenced_block.hpp000644 000164 177776 00000002443 15107057155 023630 0ustar00jenkinsnogroup000000 000000 // // detail/std_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_FENCED_BLOCK_HPP #define ASIO_DETAIL_STD_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_ATOMIC) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit std_fenced_block(half_t) { } // Constructor for a full fenced block. explicit std_fenced_block(full_t) { std::atomic_thread_fence(std::memory_order_acquire); } // Destructor. ~std_fenced_block() { std::atomic_thread_fence(std::memory_order_release); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_ATOMIC) #endif // ASIO_DETAIL_STD_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/winrt_socket_send_op.hpp000644 000164 177776 00000006420 15107057155 024601 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/winrt_async_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_socket_send_op : public winrt_async_op { public: ASIO_DEFINE_HANDLER_PTR(winrt_socket_send_op); winrt_socket_send_op(const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : winrt_async_op(&winrt_socket_send_op::do_complete), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code&, std::size_t) { // Take ownership of the operation object. winrt_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->result_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: ConstBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SOCKET_SEND_OP_HPP galera-4-26.4.25/asio/asio/detail/executor_op.hpp000644 000164 177776 00000004621 15107057155 022714 0ustar00jenkinsnogroup000000 000000 // // detail/executor_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_EXECUTOR_OP_HPP #define ASIO_DETAIL_EXECUTOR_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/scheduler_operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class executor_op : public Operation { public: ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(executor_op); template executor_op(ASIO_MOVE_ARG(H) h, const Alloc& allocator) : Operation(&executor_op::do_complete), handler_(ASIO_MOVE_CAST(H)(h)), allocator_(allocator) { } static void do_complete(void* owner, Operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. executor_op* o(static_cast(base)); Alloc allocator(o->allocator_); ptr p = { detail::addressof(allocator), o, o }; ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. Handler handler(ASIO_MOVE_CAST(Handler)(o->handler_)); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN(()); asio_handler_invoke_helpers::invoke(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; Alloc allocator_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_EXECUTOR_OP_HPP galera-4-26.4.25/asio/asio/detail/dev_poll_reactor.hpp000644 000164 177776 00000017024 15107057155 023704 0ustar00jenkinsnogroup000000 000000 // // detail/dev_poll_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DEV_POLL_REACTOR_HPP #define ASIO_DETAIL_DEV_POLL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_DEV_POLL) #include #include #include #include "asio/detail/hash_map.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class dev_poll_reactor : public execution_context_service_base { public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL dev_poll_reactor(asio::execution_context& ctx); // Destructor. ASIO_DECL ~dev_poll_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Recreate internal descriptors following a fork. ASIO_DECL void notify_fork( asio::execution_context::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { scheduler_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. The reactor resources associated with // the descriptor must be released by calling cleanup_descriptor_data. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Remove the descriptor's registration from the reactor. The reactor // resources associated with the descriptor must be released by calling // cleanup_descriptor_data. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data&); // Perform any post-deregistration cleanup tasks associated with the // descriptor data. ASIO_DECL void cleanup_descriptor_data(per_descriptor_data&); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source); // Run /dev/poll once until interrupted or events are ready to be dispatched. ASIO_DECL void run(long usec, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: // Create the /dev/poll file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_dev_poll_create(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the /dev/poll DP_POLL operation. The timeout // value is returned as a number of milliseconds. A return value of -1 // indicates that the poll should block indefinitely. ASIO_DECL int get_timeout(int msec); // Cancel all operations associated with the given descriptor. The do_cancel // function of the handler objects will be invoked. This function does not // acquire the dev_poll_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // Add a pending event entry for the given descriptor. ASIO_DECL ::pollfd& add_pending_event_change(int descriptor); // The scheduler implementation used to post completions. scheduler& scheduler_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The /dev/poll file descriptor. int dev_poll_fd_; // Vector of /dev/poll events waiting to be written to the descriptor. std::vector< ::pollfd> pending_event_changes_; // Hash map to associate a descriptor with a pending event change index. hash_map pending_event_change_index_; // The interrupter is used to break a blocking DP_POLL operation. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/dev_poll_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/dev_poll_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_DEV_POLL) #endif // ASIO_DETAIL_DEV_POLL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/assert.hpp000644 000164 177776 00000001600 15107057155 021653 0ustar00jenkinsnogroup000000 000000 // // detail/assert.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_ASSERT_HPP #define ASIO_DETAIL_ASSERT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_ASSERT) # include #else // defined(ASIO_HAS_BOOST_ASSERT) # include #endif // defined(ASIO_HAS_BOOST_ASSERT) #if defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) BOOST_ASSERT(expr) #else // defined(ASIO_HAS_BOOST_ASSERT) # define ASIO_ASSERT(expr) assert(expr) #endif // defined(ASIO_HAS_BOOST_ASSERT) #endif // ASIO_DETAIL_ASSERT_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_recvmsg_op.hpp000644 000164 177776 00000007425 15107057155 025770 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_recvmsg_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/socket_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvmsg_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvmsg_op); win_iocp_socket_recvmsg_op( socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_recvmsg_op::do_complete), cancel_token_(cancel_token), buffers_(buffers), out_flags_(out_flags), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvmsg_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvmsg(o->cancel_token_, ec); o->out_flags_ = 0; // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; socket_base::message_flags& out_flags_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVMSG_OP_HPP galera-4-26.4.25/asio/asio/detail/std_mutex.hpp000644 000164 177776 00000002422 15107057155 022371 0ustar00jenkinsnogroup000000 000000 // // detail/std_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_MUTEX_HPP #define ASIO_DETAIL_STD_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event; class std_mutex : private noncopyable { public: typedef asio::detail::scoped_lock scoped_lock; // Constructor. std_mutex() { } // Destructor. ~std_mutex() { } // Lock the mutex. void lock() { mutex_.lock(); } // Unlock the mutex. void unlock() { mutex_.unlock(); } private: friend class std_event; std::mutex mutex_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/thread_group.hpp000644 000164 177776 00000003567 15107057155 023053 0ustar00jenkinsnogroup000000 000000 // // detail/thread_group.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_GROUP_HPP #define ASIO_DETAIL_THREAD_GROUP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/thread.hpp" namespace asio { namespace detail { class thread_group { public: // Constructor initialises an empty thread group. thread_group() : first_(0) { } // Destructor joins any remaining threads in the group. ~thread_group() { join(); } // Create a new thread in the group. template void create_thread(Function f) { first_ = new item(f, first_); } // Create new threads in the group. template void create_threads(Function f, std::size_t num_threads) { for (std::size_t i = 0; i < num_threads; ++i) create_thread(f); } // Wait for all threads in the group to exit. void join() { while (first_) { first_->thread_.join(); item* tmp = first_; first_ = first_->next_; delete tmp; } } // Test whether the group is empty. bool empty() const { return first_ == 0; } private: // Structure used to track a single thread in the group. struct item { template explicit item(Function f, item* next) : thread_(f), next_(next) { } asio::detail::thread thread_; item* next_; }; // The first thread in the group. item* first_; }; } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THREAD_GROUP_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_recvfrom_op.hpp000644 000164 177776 00000007722 15107057155 026145 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recvfrom_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recvfrom_op); win_iocp_socket_recvfrom_op(Endpoint& endpoint, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_recvfrom_op::do_complete), endpoint_(endpoint), endpoint_size_(static_cast(endpoint.capacity())), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } int& endpoint_size() { return endpoint_size_; } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recvfrom(o->cancel_token_, ec); // Record the size of the endpoint returned by the operation. o->endpoint_.resize(o->endpoint_size_); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Endpoint& endpoint_; int endpoint_size_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECVFROM_OP_HPP galera-4-26.4.25/asio/asio/detail/completion_handler.hpp000644 000164 177776 00000004616 15107057155 024232 0ustar00jenkinsnogroup000000 000000 // // detail/completion_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_COMPLETION_HANDLER_HPP #define ASIO_DETAIL_COMPLETION_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_work.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class completion_handler : public operation { public: ASIO_DEFINE_HANDLER_PTR(completion_handler); completion_handler(Handler& h) : operation(&completion_handler::do_complete), handler_(ASIO_MOVE_CAST(Handler)(h)) { handler_work::start(handler_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. completion_handler* h(static_cast(base)); ptr p = { asio::detail::addressof(h->handler_), h, h }; handler_work w(h->handler_); ASIO_HANDLER_COMPLETION((*h)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. Handler handler(ASIO_MOVE_CAST(Handler)(h->handler_)); p.h = asio::detail::addressof(handler); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN(()); w.complete(handler, handler); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_COMPLETION_HANDLER_HPP galera-4-26.4.25/asio/asio/detail/wince_thread.hpp000644 000164 177776 00000004757 15107057155 023026 0ustar00jenkinsnogroup000000 000000 // // detail/wince_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINCE_THREAD_HPP #define ASIO_DETAIL_WINCE_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && defined(UNDER_CE) #include "asio/detail/noncopyable.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { DWORD WINAPI wince_thread_function(LPVOID arg); class wince_thread : private noncopyable { public: // Constructor. template wince_thread(Function f, unsigned int = 0) { scoped_ptr arg(new func(f)); DWORD thread_id = 0; thread_ = ::CreateThread(0, 0, wince_thread_function, arg.get(), 0, &thread_id); if (!thread_) { DWORD last_error = ::GetLastError(); asio::error_code ec(last_error, asio::error::get_system_category()); asio::detail::throw_error(ec, "thread"); } arg.release(); } // Destructor. ~wince_thread() { ::CloseHandle(thread_); } // Wait for the thread to exit. void join() { ::WaitForSingleObject(thread_, INFINITE); } // Get number of CPUs. static std::size_t hardware_concurrency() { SYSTEM_INFO system_info; ::GetSystemInfo(&system_info); return system_info.dwNumberOfProcessors; } private: friend DWORD WINAPI wince_thread_function(LPVOID arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ::HANDLE thread_; }; inline DWORD WINAPI wince_thread_function(LPVOID arg) { scoped_ptr func( static_cast(arg)); func->run(); return 0; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && defined(UNDER_CE) #endif // ASIO_DETAIL_WINCE_THREAD_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_null_buffers_op.hpp000644 000164 177776 00000007274 15107057155 025442 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_null_buffers_op); win_iocp_null_buffers_op(socket_ops::weak_cancel_token_type cancel_token, Handler& handler, const IoExecutor& io_ex) : reactor_op(&win_iocp_null_buffers_op::do_perform, &win_iocp_null_buffers_op::do_complete), cancel_token_(cancel_token), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static status do_perform(reactor_op*) { return done; } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // The reactor may have stored a result in the operation object. if (o->ec_) ec = o->ec_; // Map non-portable errors to their portable counterparts. if (ec.value() == ERROR_NETNAME_DELETED) { if (o->cancel_token_.expired()) ec = asio::error::operation_aborted; else ec = asio::error::connection_reset; } else if (ec.value() == ERROR_PORT_UNREACHABLE) { ec = asio::error::connection_refused; } // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::weak_cancel_token_type cancel_token_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_NULL_BUFFERS_OP_HPP galera-4-26.4.25/asio/asio/detail/timer_scheduler_fwd.hpp000644 000164 177776 00000002154 15107057155 024375 0ustar00jenkinsnogroup000000 000000 // // detail/timer_scheduler_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) typedef class winrt_timer_scheduler timer_scheduler; #elif defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context timer_scheduler; #elif defined(ASIO_HAS_EPOLL) typedef class epoll_reactor timer_scheduler; #elif defined(ASIO_HAS_KQUEUE) typedef class kqueue_reactor timer_scheduler; #elif defined(ASIO_HAS_DEV_POLL) typedef class dev_poll_reactor timer_scheduler; #else typedef class select_reactor timer_scheduler; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_TIMER_SCHEDULER_FWD_HPP galera-4-26.4.25/asio/asio/detail/io_object_executor.hpp000644 000164 177776 00000010320 15107057155 024224 0ustar00jenkinsnogroup000000 000000 // // io_object_executor.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP #define ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/type_traits.hpp" #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Wrap the (potentially polymorphic) executor so that we can bypass it when // dispatching on a target executor that has a native I/O implementation. template class io_object_executor { public: io_object_executor(const Executor& ex, bool native_implementation) ASIO_NOEXCEPT : executor_(ex), has_native_impl_(native_implementation) { } io_object_executor(const io_object_executor& other) ASIO_NOEXCEPT : executor_(other.executor_), has_native_impl_(other.has_native_impl_) { } template io_object_executor( const io_object_executor& other) ASIO_NOEXCEPT : executor_(other.inner_executor()), has_native_impl_(other.has_native_implementation()) { } #if defined(ASIO_HAS_MOVE) io_object_executor(io_object_executor&& other) ASIO_NOEXCEPT : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)), has_native_impl_(other.has_native_impl_) { } #endif // defined(ASIO_HAS_MOVE) const Executor& inner_executor() const ASIO_NOEXCEPT { return executor_; } bool has_native_implementation() const ASIO_NOEXCEPT { return has_native_impl_; } execution_context& context() const ASIO_NOEXCEPT { return executor_.context(); } void on_work_started() const ASIO_NOEXCEPT { if (is_same::value || has_native_impl_) { // When using a native implementation, work is already counted by the // execution context. } else { executor_.on_work_started(); } } void on_work_finished() const ASIO_NOEXCEPT { if (is_same::value || has_native_impl_) { // When using a native implementation, work is already counted by the // execution context. } else { executor_.on_work_finished(); } } template void dispatch(ASIO_MOVE_ARG(F) f, const A& a) const { if (is_same::value || has_native_impl_) { // When using a native implementation, I/O completion handlers are // already dispatched according to the execution context's executor's // rules. We can call the function directly. #if defined(ASIO_HAS_MOVE) if (is_same::type>::value) { asio_handler_invoke_helpers::invoke(f, f); return; } #endif // defined(ASIO_HAS_MOVE) typename decay::type function(ASIO_MOVE_CAST(F)(f)); asio_handler_invoke_helpers::invoke(function, function); } else { executor_.dispatch(ASIO_MOVE_CAST(F)(f), a); } } template void post(ASIO_MOVE_ARG(F) f, const A& a) const { executor_.post(ASIO_MOVE_CAST(F)(f), a); } template void defer(ASIO_MOVE_ARG(F) f, const A& a) const { executor_.defer(ASIO_MOVE_CAST(F)(f), a); } friend bool operator==(const io_object_executor& a, const io_object_executor& b) ASIO_NOEXCEPT { return a.executor_ == b.executor_ && a.has_native_impl_ == b.has_native_impl_; } friend bool operator!=(const io_object_executor& a, const io_object_executor& b) ASIO_NOEXCEPT { return a.executor_ != b.executor_ || a.has_native_impl_ != b.has_native_impl_; } private: Executor executor_; const bool has_native_impl_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_IO_OBJECT_EXECUTOR_HPP galera-4-26.4.25/asio/asio/detail/thread_context.hpp000644 000164 177776 00000002044 15107057155 023370 0ustar00jenkinsnogroup000000 000000 // // detail/thread_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THREAD_CONTEXT_HPP #define ASIO_DETAIL_THREAD_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include #include #include "asio/detail/call_stack.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class thread_info_base; // Base class for things that manage threads (scheduler, win_iocp_io_context). class thread_context { public: // Per-thread call stack to track the state of each thread in the context. typedef call_stack thread_call_stack; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_THREAD_CONTEXT_HPP galera-4-26.4.25/asio/asio/detail/pop_options.hpp000644 000164 177776 00000005362 15107057155 022734 0ustar00jenkinsnogroup000000 000000 // // detail/pop_options.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // No header guard #if defined(__COMO__) // Comeau C++ #elif defined(__DMC__) // Digital Mars C++ #elif defined(__INTEL_COMPILER) || defined(__ICL) \ || defined(__ICC) || defined(__ECC) // Intel C++ # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility pop # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) #elif defined(__clang__) // Clang # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility pop # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // !defined(_WIN32) && !defined(__WIN32__) && !defined(WIN32) #elif defined(__GNUC__) // GNU C++ # if defined(__MINGW32__) || defined(__CYGWIN__) # pragma pack (pop) # endif # if defined(__OBJC__) # if !defined(__APPLE_CC__) || (__APPLE_CC__ <= 1) # if defined(ASIO_OBJC_WORKAROUND) # undef Protocol # undef id # undef ASIO_OBJC_WORKAROUND # endif # endif # endif # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if !defined(ASIO_DISABLE_VISIBILITY) # pragma GCC visibility pop # endif // !defined(ASIO_DISABLE_VISIBILITY) # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4) # if (__GNUC__ >= 7) # pragma GCC diagnostic pop # endif // (__GNUC__ >= 7) #elif defined(__KCC) // Kai C++ #elif defined(__sgi) // SGI MIPSpro C++ #elif defined(__DECCXX) // Compaq Tru64 Unix cxx #elif defined(__ghs) // Greenhills C++ #elif defined(__BORLANDC__) // Borland C++ # pragma option pop # pragma nopushoptwarn # pragma nopackwarning #elif defined(__MWERKS__) // Metrowerks CodeWarrior #elif defined(__SUNPRO_CC) // Sun Workshop Compiler C++ #elif defined(__HP_aCC) // HP aCC #elif defined(__MRC__) || defined(__SC__) // MPW MrCpp or SCpp #elif defined(__IBMCPP__) // IBM Visual Age #elif defined(_MSC_VER) // Microsoft Visual C++ // // Must remain the last #elif since some other vendors (Metrowerks, for example) // also #define _MSC_VER # pragma warning (pop) # pragma pack (pop) # if defined(__cplusplus_cli) || defined(__cplusplus_winrt) # if defined(ASIO_CLR_WORKAROUND) # undef generic # undef ASIO_CLR_WORKAROUND # endif # endif #endif galera-4-26.4.25/asio/asio/detail/timer_queue.hpp000644 000164 177776 00000023025 15107057155 022703 0ustar00jenkinsnogroup000000 000000 // // detail/timer_queue.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_HPP #define ASIO_DETAIL_TIMER_QUEUE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/cstdint.hpp" #include "asio/detail/date_time_fwd.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class timer_queue : public timer_queue_base { public: // The time type. typedef typename Time_Traits::time_type time_type; // The duration type. typedef typename Time_Traits::duration_type duration_type; // Per-timer data. class per_timer_data { public: per_timer_data() : heap_index_((std::numeric_limits::max)()), next_(0), prev_(0) { } private: friend class timer_queue; // The operations waiting on the timer. op_queue op_queue_; // The index of the timer in the heap. std::size_t heap_index_; // Pointers to adjacent timers in a linked list. per_timer_data* next_; per_timer_data* prev_; }; // Constructor. timer_queue() : timers_(), heap_() { } // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op) { // Enqueue the timer object. if (timer.prev_ == 0 && &timer != timers_) { if (this->is_positive_infinity(time)) { // No heap entry is required for timers that never expire. timer.heap_index_ = (std::numeric_limits::max)(); } else { // Put the new timer at the correct position in the heap. This is done // first since push_back() can throw due to allocation failure. timer.heap_index_ = heap_.size(); heap_entry entry = { time, &timer }; heap_.push_back(entry); up_heap(heap_.size() - 1); } // Insert the new timer into the linked list of active timers. timer.next_ = timers_; timer.prev_ = 0; if (timers_) timers_->prev_ = &timer; timers_ = &timer; } // Enqueue the individual timer operation. timer.op_queue_.push(op); // Interrupt reactor only if newly added timer is first to expire. return timer.heap_index_ == 0 && timer.op_queue_.front() == op; } // Whether there are no timers in the queue. virtual bool empty() const { return timers_ == 0; } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_msec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_msec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Get the time for the timer that is earliest in the queue. virtual long wait_duration_usec(long max_duration) const { if (heap_.empty()) return max_duration; return this->to_usec( Time_Traits::to_posix_duration( Time_Traits::subtract(heap_[0].time_, Time_Traits::now())), max_duration); } // Dequeue all timers not later than the current time. virtual void get_ready_timers(op_queue& ops) { if (!heap_.empty()) { const time_type now = Time_Traits::now(); while (!heap_.empty() && !Time_Traits::less_than(now, heap_[0].time_)) { per_timer_data* timer = heap_[0].timer_; ops.push(timer->op_queue_); remove_timer(*timer); } } } // Dequeue all timers. virtual void get_all_timers(op_queue& ops) { while (timers_) { per_timer_data* timer = timers_; timers_ = timers_->next_; ops.push(timer->op_queue_); timer->next_ = 0; timer->prev_ = 0; } heap_.clear(); } // Cancel and dequeue operations for the given timer. std::size_t cancel_timer(per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()) { std::size_t num_cancelled = 0; if (timer.prev_ != 0 || &timer == timers_) { while (wait_op* op = (num_cancelled != max_cancelled) ? timer.op_queue_.front() : 0) { op->ec_ = asio::error::operation_aborted; timer.op_queue_.pop(); ops.push(op); ++num_cancelled; } if (timer.op_queue_.empty()) remove_timer(timer); } return num_cancelled; } // Move operations from one timer to another, empty timer. void move_timer(per_timer_data& target, per_timer_data& source) { target.op_queue_.push(source.op_queue_); target.heap_index_ = source.heap_index_; source.heap_index_ = (std::numeric_limits::max)(); if (target.heap_index_ < heap_.size()) heap_[target.heap_index_].timer_ = ⌖ if (timers_ == &source) timers_ = ⌖ if (source.prev_) source.prev_->next_ = ⌖ if (source.next_) source.next_->prev_= ⌖ target.next_ = source.next_; target.prev_ = source.prev_; source.next_ = 0; source.prev_ = 0; } private: // Move the item at the given index up the heap to its correct position. void up_heap(std::size_t index) { while (index > 0) { std::size_t parent = (index - 1) / 2; if (!Time_Traits::less_than(heap_[index].time_, heap_[parent].time_)) break; swap_heap(index, parent); index = parent; } } // Move the item at the given index down the heap to its correct position. void down_heap(std::size_t index) { std::size_t child = index * 2 + 1; while (child < heap_.size()) { std::size_t min_child = (child + 1 == heap_.size() || Time_Traits::less_than( heap_[child].time_, heap_[child + 1].time_)) ? child : child + 1; if (Time_Traits::less_than(heap_[index].time_, heap_[min_child].time_)) break; swap_heap(index, min_child); index = min_child; child = index * 2 + 1; } } // Swap two entries in the heap. void swap_heap(std::size_t index1, std::size_t index2) { heap_entry tmp = heap_[index1]; heap_[index1] = heap_[index2]; heap_[index2] = tmp; heap_[index1].timer_->heap_index_ = index1; heap_[index2].timer_->heap_index_ = index2; } // Remove a timer from the heap and list of timers. void remove_timer(per_timer_data& timer) { // Remove the timer from the heap. std::size_t index = timer.heap_index_; if (!heap_.empty() && index < heap_.size()) { if (index == heap_.size() - 1) { timer.heap_index_ = (std::numeric_limits::max)(); heap_.pop_back(); } else { swap_heap(index, heap_.size() - 1); timer.heap_index_ = (std::numeric_limits::max)(); heap_.pop_back(); if (index > 0 && Time_Traits::less_than( heap_[index].time_, heap_[(index - 1) / 2].time_)) up_heap(index); else down_heap(index); } } // Remove the timer from the linked list of active timers. if (timers_ == &timer) timers_ = timer.next_; if (timer.prev_) timer.prev_->next_ = timer.next_; if (timer.next_) timer.next_->prev_= timer.prev_; timer.next_ = 0; timer.prev_ = 0; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity(const Time_Type&) { return false; } // Determine if the specified absolute time is positive infinity. template static bool is_positive_infinity( const boost::date_time::base_time& time) { return time.is_pos_infinity(); } // Helper function to convert a duration into milliseconds. template long to_msec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t msec = d.total_milliseconds(); if (msec == 0) return 1; if (msec > max_duration) return max_duration; return static_cast(msec); } // Helper function to convert a duration into microseconds. template long to_usec(const Duration& d, long max_duration) const { if (d.ticks() <= 0) return 0; int64_t usec = d.total_microseconds(); if (usec == 0) return 1; if (usec > max_duration) return max_duration; return static_cast(usec); } // The head of a linked list of all active timers. per_timer_data* timers_; struct heap_entry { // The time when the timer should fire. time_type time_; // The associated timer with enqueued operations. per_timer_data* timer_; }; // The heap of timers, with the earliest timer at the front. std::vector heap_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TIMER_QUEUE_HPP galera-4-26.4.25/asio/asio/detail/chrono.hpp000644 000164 177776 00000003654 15107057155 021655 0ustar00jenkinsnogroup000000 000000 // // detail/chrono.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CHRONO_HPP #define ASIO_DETAIL_CHRONO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_CHRONO) # include #elif defined(ASIO_HAS_BOOST_CHRONO) # include #endif // defined(ASIO_HAS_BOOST_CHRONO) namespace asio { namespace chrono { #if defined(ASIO_HAS_STD_CHRONO) using std::chrono::duration; using std::chrono::time_point; using std::chrono::duration_cast; using std::chrono::nanoseconds; using std::chrono::microseconds; using std::chrono::milliseconds; using std::chrono::seconds; using std::chrono::minutes; using std::chrono::hours; using std::chrono::time_point_cast; #if defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK) typedef std::chrono::monotonic_clock steady_clock; #else // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK) using std::chrono::steady_clock; #endif // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK) using std::chrono::system_clock; using std::chrono::high_resolution_clock; #elif defined(ASIO_HAS_BOOST_CHRONO) using boost::chrono::duration; using boost::chrono::time_point; using boost::chrono::duration_cast; using boost::chrono::nanoseconds; using boost::chrono::microseconds; using boost::chrono::milliseconds; using boost::chrono::seconds; using boost::chrono::minutes; using boost::chrono::hours; using boost::chrono::time_point_cast; using boost::chrono::system_clock; using boost::chrono::steady_clock; using boost::chrono::high_resolution_clock; #endif // defined(ASIO_HAS_BOOST_CHRONO) } // namespace chrono } // namespace asio #endif // ASIO_DETAIL_CHRONO_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_io_context.hpp000644 000164 177776 00000025643 15107057155 024431 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_io_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP #define ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/thread.hpp" #include "asio/detail/thread_context.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/detail/win_iocp_operation.hpp" #include "asio/detail/win_iocp_thread_info.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class wait_op; class win_iocp_io_context : public execution_context_service_base, public thread_context { public: // Constructor. Specifies a concurrency hint that is passed through to the // underlying I/O completion port. ASIO_DECL win_iocp_io_context(asio::execution_context& ctx, int concurrency_hint = -1, bool own_thread = true); // Destructor. ASIO_DECL ~win_iocp_io_context(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Initialise the task. Nothing to do here. void init_task() { } // Register a handle with the IO completion port. ASIO_DECL asio::error_code register_handle( HANDLE handle, asio::error_code& ec); // Run the event loop until stopped or no more work. ASIO_DECL size_t run(asio::error_code& ec); // Run until stopped or one operation is performed. ASIO_DECL size_t run_one(asio::error_code& ec); // Run until timeout, interrupted, or one operation is performed. ASIO_DECL size_t wait_one(long usec, asio::error_code& ec); // Poll for operations without blocking. ASIO_DECL size_t poll(asio::error_code& ec); // Poll for one operation without blocking. ASIO_DECL size_t poll_one(asio::error_code& ec); // Stop the event processing loop. ASIO_DECL void stop(); // Determine whether the io_context is stopped. bool stopped() const { return ::InterlockedExchangeAdd(&stopped_, 0) != 0; } // Restart in preparation for a subsequent run invocation. void restart() { ::InterlockedExchange(&stopped_, 0); } // Notify that some work has started. void work_started() { ::InterlockedIncrement(&outstanding_work_); } // Notify that some work has finished. void work_finished() { if (::InterlockedDecrement(&outstanding_work_) == 0) stop(); } // Return whether a handler can be dispatched immediately. bool can_dispatch() { return thread_call_stack::contains(this) != 0; } // Request invocation of the given operation and return immediately. Assumes // that work_started() has not yet been called for the operation. void post_immediate_completion(win_iocp_operation* op, bool) { work_started(); post_deferred_completion(op); } // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operation. ASIO_DECL void post_deferred_completion(win_iocp_operation* op); // Request invocation of the given operation and return immediately. Assumes // that work_started() was previously called for the operations. ASIO_DECL void post_deferred_completions( op_queue& ops); // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() has not yet been // called for the operation. void post_private_immediate_completion(win_iocp_operation* op) { post_immediate_completion(op, false); } // Request invocation of the given operation using the thread-private queue // and return immediately. Assumes that work_started() was previously called // for the operation. void post_private_deferred_completion(win_iocp_operation* op) { post_deferred_completion(op); } // Enqueue the given operation following a failed attempt to dispatch the // operation for immediate invocation. void do_dispatch(operation* op) { post_immediate_completion(op, false); } // Process unfinished operations as part of a shutdown operation. Assumes // that work_started() was previously called for the operations. ASIO_DECL void abandon_operations(op_queue& ops); // Called after starting an overlapped I/O operation that did not complete // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_pending(win_iocp_operation* op); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, DWORD last_error = 0, DWORD bytes_transferred = 0); // Called after starting an overlapped I/O operation that completed // immediately. The caller must have already called work_started() prior to // starting the operation. ASIO_DECL void on_completion(win_iocp_operation* op, const asio::error_code& ec, DWORD bytes_transferred = 0); // Add a new timer queue to the service. template void add_timer_queue(timer_queue& timer_queue); // Remove a timer queue from the service. template void remove_timer_queue(timer_queue& timer_queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer associated with the given token. Returns the number of // handlers that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& to, typename timer_queue::per_timer_data& from); // Get the concurrency hint that was used to initialise the io_context. int concurrency_hint() const { return concurrency_hint_; } private: #if defined(WINVER) && (WINVER < 0x0500) typedef DWORD dword_ptr_t; typedef ULONG ulong_ptr_t; #else // defined(WINVER) && (WINVER < 0x0500) typedef DWORD_PTR dword_ptr_t; typedef ULONG_PTR ulong_ptr_t; #endif // defined(WINVER) && (WINVER < 0x0500) // Dequeues at most one operation from the I/O completion port, and then // executes it. Returns the number of operations that were dequeued (i.e. // either 0 or 1). ASIO_DECL size_t do_one(DWORD msec, asio::error_code& ec); // Helper to calculate the GetQueuedCompletionStatus timeout. ASIO_DECL static DWORD get_gqcs_timeout(); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Called to recalculate and update the timeout. ASIO_DECL void update_timeout(); // Helper class to call work_finished() on block exit. struct work_finished_on_block_exit; // Helper class for managing a HANDLE. struct auto_handle { HANDLE handle; auto_handle() : handle(0) {} ~auto_handle() { if (handle) ::CloseHandle(handle); } }; // The IO completion port used for queueing operations. auto_handle iocp_; // The count of unfinished work. long outstanding_work_; // Flag to indicate whether the event loop has been stopped. mutable long stopped_; // Flag to indicate whether there is an in-flight stop event. Every event // posted using PostQueuedCompletionStatus consumes non-paged pool, so to // avoid exhausting this resouce we limit the number of outstanding events. long stop_event_posted_; // Flag to indicate whether the service has been shut down. long shutdown_; enum { // Timeout to use with GetQueuedCompletionStatus on older versions of // Windows. Some versions of windows have a "bug" where a call to // GetQueuedCompletionStatus can appear stuck even though there are events // waiting on the queue. Using a timeout helps to work around the issue. default_gqcs_timeout = 500, // Maximum waitable timer timeout, in milliseconds. max_timeout_msec = 5 * 60 * 1000, // Maximum waitable timer timeout, in microseconds. max_timeout_usec = max_timeout_msec * 1000, // Completion key value used to wake up a thread to dispatch timers or // completed operations. wake_for_dispatch = 1, // Completion key value to indicate that an operation has posted with the // original last_error and bytes_transferred values stored in the fields of // the OVERLAPPED structure. overlapped_contains_result = 2 }; // Timeout to use with GetQueuedCompletionStatus. const DWORD gqcs_timeout_; // Helper class to run the scheduler in its own thread. struct thread_function; friend struct thread_function; // Function object for processing timeouts in a background thread. struct timer_thread_function; friend struct timer_thread_function; // Background thread used for processing timeouts. scoped_ptr timer_thread_; // A waitable timer object used for waiting for timeouts. auto_handle waitable_timer_; // Non-zero if timers or completed operations need to be dispatched. long dispatch_required_; // Mutex for protecting access to the timer queues and completed operations. mutex dispatch_mutex_; // The timer queues. timer_queue_set timer_queues_; // The operations that are ready to dispatch. op_queue completed_ops_; // The concurrency hint used to initialise the io_context. const int concurrency_hint_; // The thread that is running the io_context. scoped_ptr thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/win_iocp_io_context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_io_context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_IO_CONTEXT_HPP galera-4-26.4.25/asio/asio/detail/timer_scheduler.hpp000644 000164 177776 00000002045 15107057155 023534 0ustar00jenkinsnogroup000000 000000 // // detail/timer_scheduler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_SCHEDULER_HPP #define ASIO_DETAIL_TIMER_SCHEDULER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/timer_scheduler_fwd.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_timer_scheduler.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #elif defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_TIMER_SCHEDULER_HPP galera-4-26.4.25/asio/asio/detail/socket_option.hpp000644 000164 177776 00000014214 15107057155 023237 0ustar00jenkinsnogroup000000 000000 // // detail/socket_option.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_OPTION_HPP #define ASIO_DETAIL_SOCKET_OPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { namespace socket_option { // Helper template for implementing boolean-based options. template class boolean { public: // Default constructor. boolean() : value_(0) { } // Construct with a specific option value. explicit boolean(bool v) : value_(v ? 1 : 0) { } // Set the current value of the boolean. boolean& operator=(bool v) { value_ = v ? 1 : 0; return *this; } // Get the current value of the boolean. bool value() const { return !!value_; } // Convert to bool. operator bool() const { return !!value_; } // Test for false. bool operator!() const { return !value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the boolean data. template int* data(const Protocol&) { return &value_; } // Get the address of the boolean data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the boolean data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the boolean data. template void resize(const Protocol&, std::size_t s) { // On some platforms (e.g. Windows Vista), the getsockopt function will // return the size of a boolean socket option as one byte, even though a // four byte integer was passed in. switch (s) { case sizeof(char): value_ = *reinterpret_cast(&value_) ? 1 : 0; break; case sizeof(value_): break; default: { std::length_error ex("boolean socket option resize"); asio::detail::throw_exception(ex); } } } private: int value_; }; // Helper template for implementing integer options. template class integer { public: // Default constructor. integer() : value_(0) { } // Construct with a specific option value. explicit integer(int v) : value_(v) { } // Set the value of the int option. integer& operator=(int v) { value_ = v; return *this; } // Get the current value of the int option. int value() const { return value_; } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the int data. template int* data(const Protocol&) { return &value_; } // Get the address of the int data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the int data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("integer socket option resize"); asio::detail::throw_exception(ex); } } private: int value_; }; // Helper template for implementing linger options. template class linger { public: // Default constructor. linger() { value_.l_onoff = 0; value_.l_linger = 0; } // Construct with specific option values. linger(bool e, int t) { enabled(e); timeout ASIO_PREVENT_MACRO_SUBSTITUTION(t); } // Set the value for whether linger is enabled. void enabled(bool value) { value_.l_onoff = value ? 1 : 0; } // Get the value for whether linger is enabled. bool enabled() const { return value_.l_onoff != 0; } // Set the value for the linger timeout. void timeout ASIO_PREVENT_MACRO_SUBSTITUTION(int value) { #if defined(WIN32) value_.l_linger = static_cast(value); #else value_.l_linger = value; #endif } // Get the value for the linger timeout. int timeout ASIO_PREVENT_MACRO_SUBSTITUTION() const { return static_cast(value_.l_linger); } // Get the level of the socket option. template int level(const Protocol&) const { return Level; } // Get the name of the socket option. template int name(const Protocol&) const { return Name; } // Get the address of the linger data. template detail::linger_type* data(const Protocol&) { return &value_; } // Get the address of the linger data. template const detail::linger_type* data(const Protocol&) const { return &value_; } // Get the size of the linger data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the int data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("linger socket option resize"); asio::detail::throw_exception(ex); } } private: detail::linger_type value_; }; } // namespace socket_option } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_OPTION_HPP galera-4-26.4.25/asio/asio/detail/fenced_block.hpp000644 000164 177776 00000005517 15107057155 022763 0ustar00jenkinsnogroup000000 000000 // // detail/fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FENCED_BLOCK_HPP #define ASIO_DETAIL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) # include "asio/detail/null_fenced_block.hpp" #elif defined(ASIO_HAS_STD_ATOMIC) # include "asio/detail/std_fenced_block.hpp" #elif defined(__MACH__) && defined(__APPLE__) # include "asio/detail/macos_fenced_block.hpp" #elif defined(__sun) # include "asio/detail/solaris_fenced_block.hpp" #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) # include "asio/detail/gcc_arm_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) # include "asio/detail/gcc_hppa_fenced_block.hpp" #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # include "asio/detail/gcc_x86_fenced_block.hpp" #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) # include "asio/detail/gcc_sync_fenced_block.hpp" #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) # include "asio/detail/win_fenced_block.hpp" #else # include "asio/detail/null_fenced_block.hpp" #endif namespace asio { namespace detail { #if !defined(ASIO_HAS_THREADS) \ || defined(ASIO_DISABLE_FENCED_BLOCK) typedef null_fenced_block fenced_block; #elif defined(ASIO_HAS_STD_ATOMIC) typedef std_fenced_block fenced_block; #elif defined(__MACH__) && defined(__APPLE__) typedef macos_fenced_block fenced_block; #elif defined(__sun) typedef solaris_fenced_block fenced_block; #elif defined(__GNUC__) && defined(__arm__) \ && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) typedef gcc_arm_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__hppa) || defined(__hppa__)) typedef gcc_hppa_fenced_block fenced_block; #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) typedef gcc_x86_fenced_block fenced_block; #elif defined(__GNUC__) \ && ((__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || (__GNUC__ > 4)) \ && !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !defined(__ICC) && !defined(__ECC) && !defined(__PATHSCALE__) typedef gcc_sync_fenced_block fenced_block; #elif defined(ASIO_WINDOWS) && !defined(UNDER_CE) typedef win_fenced_block fenced_block; #else typedef null_fenced_block fenced_block; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/tss_ptr.hpp000644 000164 177776 00000003260 15107057155 022054 0ustar00jenkinsnogroup000000 000000 // // detail/tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TSS_PTR_HPP #define ASIO_DETAIL_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_tss_ptr.hpp" #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) # include "asio/detail/keyword_tss_ptr.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_tss_ptr.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_tss_ptr.hpp" #else # error Only Windows and POSIX are supported! #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class tss_ptr #if !defined(ASIO_HAS_THREADS) : public null_tss_ptr #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) : public keyword_tss_ptr #elif defined(ASIO_WINDOWS) : public win_tss_ptr #elif defined(ASIO_HAS_PTHREADS) : public posix_tss_ptr #endif { public: void operator=(T* value) { #if !defined(ASIO_HAS_THREADS) null_tss_ptr::operator=(value); #elif defined(ASIO_HAS_THREAD_KEYWORD_EXTENSION) keyword_tss_ptr::operator=(value); #elif defined(ASIO_WINDOWS) win_tss_ptr::operator=(value); #elif defined(ASIO_HAS_PTHREADS) posix_tss_ptr::operator=(value); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_TSS_PTR_HPP galera-4-26.4.25/asio/asio/detail/posix_event.hpp000644 000164 177776 00000007670 15107057155 022732 0ustar00jenkinsnogroup000000 000000 // // detail/posix_event.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_EVENT_HPP #define ASIO_DETAIL_POSIX_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class posix_event : private noncopyable { public: // Constructor. ASIO_DECL posix_event(); // Destructor. ~posix_event() { ::pthread_cond_destroy(&cond_); } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::pthread_cond_broadcast(&cond_); // Ignore EINVAL. } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::pthread_cond_signal(&cond_); // Ignore EINVAL. } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::pthread_cond_signal(&cond_); // Ignore EINVAL. return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; ::pthread_cond_wait(&cond_, &lock.mutex().mutex_); // Ignore EINVAL. state_ -= 2; } } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock& lock, long usec) { ASIO_ASSERT(lock.locked()); if ((state_ & 1) == 0) { state_ += 2; timespec ts; #if (defined(__MACH__) && defined(__APPLE__)) \ || (defined(__ANDROID__) && (__ANDROID_API__ < 21) \ && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE)) ts.tv_sec = usec / 1000000; ts.tv_nsec = (usec % 1000000) * 1000; ::pthread_cond_timedwait_relative_np( &cond_, &lock.mutex().mutex_, &ts); // Ignore EINVAL. #else // (defined(__MACH__) && defined(__APPLE__)) // || (defined(__ANDROID__) && (__ANDROID_API__ < 21) // && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE)) if (::clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { ts.tv_sec += usec / 1000000; ts.tv_nsec += (usec % 1000000) * 1000; ts.tv_sec += ts.tv_nsec / 1000000000; ts.tv_nsec = ts.tv_nsec % 1000000000; ::pthread_cond_timedwait(&cond_, &lock.mutex().mutex_, &ts); // Ignore EINVAL. } #endif // (defined(__MACH__) && defined(__APPLE__)) // || (defined(__ANDROID__) && (__ANDROID_API__ < 21) // && defined(HAVE_PTHREAD_COND_TIMEDWAIT_RELATIVE)) state_ -= 2; } return (state_ & 1) != 0; } private: ::pthread_cond_t cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_EVENT_HPP galera-4-26.4.25/asio/asio/detail/throw_exception.hpp000644 000164 177776 00000002577 15107057155 023611 0ustar00jenkinsnogroup000000 000000 // // detail/throw_exception.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_EXCEPTION_HPP #define ASIO_DETAIL_THROW_EXCEPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # include #endif // defined(ASIO_BOOST_THROW_EXCEPTION) namespace asio { namespace detail { #if defined(ASIO_HAS_BOOST_THROW_EXCEPTION) using boost::throw_exception; #else // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Declare the throw_exception function for all targets. template void throw_exception(const Exception& e); // Only define the throw_exception function when exceptions are enabled. // Otherwise, it is up to the application to provide a definition of this // function. # if !defined(ASIO_NO_EXCEPTIONS) template void throw_exception(const Exception& e) { throw e; } # endif // !defined(ASIO_NO_EXCEPTIONS) #endif // defined(ASIO_HAS_BOOST_THROW_EXCEPTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_THROW_EXCEPTION_HPP galera-4-26.4.25/asio/asio/detail/service_registry.hpp000644 000164 177776 00000012461 15107057155 023751 0ustar00jenkinsnogroup000000 000000 // // detail/service_registry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SERVICE_REGISTRY_HPP #define ASIO_DETAIL_SERVICE_REGISTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { class io_context; namespace detail { template class typeid_wrapper {}; class service_registry : private noncopyable { public: // Constructor. ASIO_DECL service_registry(execution_context& owner); // Destructor. ASIO_DECL ~service_registry(); // Shutdown all services. ASIO_DECL void shutdown_services(); // Destroy all services. ASIO_DECL void destroy_services(); // Notify all services of a fork event. ASIO_DECL void notify_fork(execution_context::fork_event fork_ev); // Get the service object corresponding to the specified service type. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. template Service& use_service(); // Get the service object corresponding to the specified service type. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. // This overload is used for backwards compatibility with services that // inherit from io_context::service. template Service& use_service(io_context& owner); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. template void add_service(Service* new_service); // Check whether a service object of the specified type already exists. template bool has_service() const; private: // Initalise a service's key when the key_type typedef is not available. template static void init_key(execution_context::service::key& key, ...); #if !defined(ASIO_NO_TYPEID) // Initalise a service's key when the key_type typedef is available. template static void init_key(execution_context::service::key& key, typename enable_if< is_base_of::value>::type*); #endif // !defined(ASIO_NO_TYPEID) // Initialise a service's key based on its id. ASIO_DECL static void init_key_from_id( execution_context::service::key& key, const execution_context::id& id); #if !defined(ASIO_NO_TYPEID) // Initialise a service's key based on its id. template static void init_key_from_id(execution_context::service::key& key, const service_id& /*id*/); #endif // !defined(ASIO_NO_TYPEID) // Check if a service matches the given id. ASIO_DECL static bool keys_match( const execution_context::service::key& key1, const execution_context::service::key& key2); // The type of a factory function used for creating a service instance. typedef execution_context::service*(*factory_type)(void*); // Factory function for creating a service instance. template static execution_context::service* create(void* owner); // Destroy a service instance. ASIO_DECL static void destroy(execution_context::service* service); // Helper class to manage service pointers. struct auto_service_ptr; friend struct auto_service_ptr; struct auto_service_ptr { execution_context::service* ptr_; ~auto_service_ptr() { destroy(ptr_); } }; // Get the service object corresponding to the specified service key. Will // create a new service object automatically if no such object already // exists. Ownership of the service object is not transferred to the caller. ASIO_DECL execution_context::service* do_use_service( const execution_context::service::key& key, factory_type factory, void* owner); // Add a service object. Throws on error, in which case ownership of the // object is retained by the caller. ASIO_DECL void do_add_service( const execution_context::service::key& key, execution_context::service* new_service); // Check whether a service object with the specified key already exists. ASIO_DECL bool do_has_service( const execution_context::service::key& key) const; // Mutex to protect access to internal data. mutable asio::detail::mutex mutex_; // The owner of this service registry and the services it contains. execution_context& owner_; // The first service in the list of contained services. execution_context::service* first_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/service_registry.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/service_registry.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_SERVICE_REGISTRY_HPP galera-4-26.4.25/asio/asio/detail/bind_handler.hpp000644 000164 177776 00000054747 15107057155 023007 0ustar00jenkinsnogroup000000 000000 // // detail/bind_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BIND_HANDLER_HPP #define ASIO_DETAIL_BIND_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class binder1 { public: template binder1(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1) : handler_(ASIO_MOVE_CAST(T)(handler)), arg1_(arg1) { } binder1(Handler& handler, const Arg1& arg1) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1) { } #if defined(ASIO_HAS_MOVE) binder1(const binder1& other) : handler_(other.handler_), arg1_(other.arg1_) { } binder1(binder1&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(static_cast(arg1_)); } void operator()() const { handler_(arg1_); } //private: Handler handler_; Arg1 arg1_; }; template inline void* asio_handler_allocate(std::size_t size, binder1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder1* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder1* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder1::type, Arg1> bind_handler( ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1) { return binder1::type, Arg1>(0, ASIO_MOVE_CAST(Handler)(handler), arg1); } template class binder2 { public: template binder2(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1, const Arg2& arg2) : handler_(ASIO_MOVE_CAST(T)(handler)), arg1_(arg1), arg2_(arg2) { } binder2(Handler& handler, const Arg1& arg1, const Arg2& arg2) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2) { } #if defined(ASIO_HAS_MOVE) binder2(const binder2& other) : handler_(other.handler_), arg1_(other.arg1_), arg2_(other.arg2_) { } binder2(binder2&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)), arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(static_cast(arg1_), static_cast(arg2_)); } void operator()() const { handler_(arg1_, arg2_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; }; template inline void* asio_handler_allocate(std::size_t size, binder2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder2* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder2* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder2::type, Arg1, Arg2> bind_handler( ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2) { return binder2::type, Arg1, Arg2>(0, ASIO_MOVE_CAST(Handler)(handler), arg1, arg2); } template class binder3 { public: template binder3(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(ASIO_MOVE_CAST(T)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } binder3(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3) { } #if defined(ASIO_HAS_MOVE) binder3(const binder3& other) : handler_(other.handler_), arg1_(other.arg1_), arg2_(other.arg2_), arg3_(other.arg3_) { } binder3(binder3&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)), arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)), arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_)); } void operator()() const { handler_(arg1_, arg2_, arg3_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; }; template inline void* asio_handler_allocate(std::size_t size, binder3* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder3* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder3* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder3* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder3::type, Arg1, Arg2, Arg3> bind_handler( ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { return binder3::type, Arg1, Arg2, Arg3>(0, ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3); } template class binder4 { public: template binder4(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(ASIO_MOVE_CAST(T)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } binder4(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4) { } #if defined(ASIO_HAS_MOVE) binder4(const binder4& other) : handler_(other.handler_), arg1_(other.arg1_), arg2_(other.arg2_), arg3_(other.arg3_), arg4_(other.arg4_) { } binder4(binder4&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)), arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)), arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_)), arg4_(ASIO_MOVE_CAST(Arg4)(other.arg4_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; }; template inline void* asio_handler_allocate(std::size_t size, binder4* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder4* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder4* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder4* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder4::type, Arg1, Arg2, Arg3, Arg4> bind_handler(ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { return binder4::type, Arg1, Arg2, Arg3, Arg4>(0, ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3, arg4); } template class binder5 { public: template binder5(int, ASIO_MOVE_ARG(T) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(ASIO_MOVE_CAST(T)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } binder5(Handler& handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(arg2), arg3_(arg3), arg4_(arg4), arg5_(arg5) { } #if defined(ASIO_HAS_MOVE) binder5(const binder5& other) : handler_(other.handler_), arg1_(other.arg1_), arg2_(other.arg2_), arg3_(other.arg3_), arg4_(other.arg4_), arg5_(other.arg5_) { } binder5(binder5&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)), arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)), arg3_(ASIO_MOVE_CAST(Arg3)(other.arg3_)), arg4_(ASIO_MOVE_CAST(Arg4)(other.arg4_)), arg5_(ASIO_MOVE_CAST(Arg5)(other.arg5_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(static_cast(arg1_), static_cast(arg2_), static_cast(arg3_), static_cast(arg4_), static_cast(arg5_)); } void operator()() const { handler_(arg1_, arg2_, arg3_, arg4_, arg5_); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; Arg3 arg3_; Arg4 arg4_; Arg5 arg5_; }; template inline void* asio_handler_allocate(std::size_t size, binder5* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, binder5* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( binder5* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, binder5* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline binder5::type, Arg1, Arg2, Arg3, Arg4, Arg5> bind_handler(ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { return binder5::type, Arg1, Arg2, Arg3, Arg4, Arg5>(0, ASIO_MOVE_CAST(Handler)(handler), arg1, arg2, arg3, arg4, arg5); } #if defined(ASIO_HAS_MOVE) template class move_binder1 { public: move_binder1(int, ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Arg1) arg1) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(ASIO_MOVE_CAST(Arg1)(arg1)) { } move_binder1(move_binder1&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)) { } void operator()() { handler_(ASIO_MOVE_CAST(Arg1)(arg1_)); } //private: Handler handler_; Arg1 arg1_; }; template inline void* asio_handler_allocate(std::size_t size, move_binder1* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, move_binder1* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( move_binder1* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(ASIO_MOVE_ARG(Function) function, move_binder1* this_handler) { asio_handler_invoke_helpers::invoke( ASIO_MOVE_CAST(Function)(function), this_handler->handler_); } template class move_binder2 { public: move_binder2(int, ASIO_MOVE_ARG(Handler) handler, const Arg1& arg1, ASIO_MOVE_ARG(Arg2) arg2) : handler_(ASIO_MOVE_CAST(Handler)(handler)), arg1_(arg1), arg2_(ASIO_MOVE_CAST(Arg2)(arg2)) { } move_binder2(move_binder2&& other) : handler_(ASIO_MOVE_CAST(Handler)(other.handler_)), arg1_(ASIO_MOVE_CAST(Arg1)(other.arg1_)), arg2_(ASIO_MOVE_CAST(Arg2)(other.arg2_)) { } void operator()() { handler_(static_cast(arg1_), ASIO_MOVE_CAST(Arg2)(arg2_)); } //private: Handler handler_; Arg1 arg1_; Arg2 arg2_; }; template inline void* asio_handler_allocate(std::size_t size, move_binder2* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, move_binder2* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( move_binder2* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template inline void asio_handler_invoke(ASIO_MOVE_ARG(Function) function, move_binder2* this_handler) { asio_handler_invoke_helpers::invoke( ASIO_MOVE_CAST(Function)(function), this_handler->handler_); } #endif // defined(ASIO_HAS_MOVE) } // namespace detail template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::binder1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::binder2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get(const detail::binder1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get(const detail::binder2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #if defined(ASIO_HAS_MOVE) template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::move_binder1& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_allocator< detail::move_binder2, Allocator> { typedef typename associated_allocator::type type; static type get(const detail::move_binder2& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get(const detail::move_binder1& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; template struct associated_executor, Executor> { typedef typename associated_executor::type type; static type get(const detail::move_binder2& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; #endif // defined(ASIO_HAS_MOVE) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BIND_HANDLER_HPP galera-4-26.4.25/asio/asio/detail/winsock_init.hpp000644 000164 177776 00000006022 15107057155 023055 0ustar00jenkinsnogroup000000 000000 // // detail/winsock_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINSOCK_INIT_HPP #define ASIO_DETAIL_WINSOCK_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winsock_init_base { protected: // Structure to track result of initialisation and number of uses. POD is used // to ensure that the values are zero-initialised prior to any code being run. struct data { long init_count_; long result_; }; ASIO_DECL static void startup(data& d, unsigned char major, unsigned char minor); ASIO_DECL static void manual_startup(data& d); ASIO_DECL static void cleanup(data& d); ASIO_DECL static void manual_cleanup(data& d); ASIO_DECL static void throw_on_error(data& d); }; template class winsock_init : private winsock_init_base { public: winsock_init(bool allow_throw = true) { startup(data_, Major, Minor); if (allow_throw) throw_on_error(data_); } winsock_init(const winsock_init&) { startup(data_, Major, Minor); throw_on_error(data_); } ~winsock_init() { cleanup(data_); } // This class may be used to indicate that user code will manage Winsock // initialisation and cleanup. This may be required in the case of a DLL, for // example, where it is not safe to initialise Winsock from global object // constructors. // // To prevent asio from initialising Winsock, the object must be constructed // before any Asio's own global objects. With MSVC, this may be accomplished // by adding the following code to the DLL: // // #pragma warning(push) // #pragma warning(disable:4073) // #pragma init_seg(lib) // asio::detail::winsock_init<>::manual manual_winsock_init; // #pragma warning(pop) class manual { public: manual() { manual_startup(data_); } manual(const manual&) { manual_startup(data_); } ~manual() { manual_cleanup(data_); } }; private: friend class manual; static data data_; }; template winsock_init_base::data winsock_init::data_; // Static variable to ensure that winsock is initialised before main, and // therefore before any other threads can get started. static const winsock_init<>& winsock_init_instance = winsock_init<>(false); } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winsock_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WINSOCK_INIT_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_serial_port_service.hpp000644 000164 177776 00000016605 15107057155 026317 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_serial_port_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #define ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #include #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Extend win_iocp_handle_service to provide serial port support. class win_iocp_serial_port_service : public execution_context_service_base { public: // The native type of a serial port. typedef win_iocp_handle_service::native_handle_type native_handle_type; // The implementation type of the serial port. typedef win_iocp_handle_service::implementation_type implementation_type; // Constructor. ASIO_DECL win_iocp_serial_port_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Construct a new serial port implementation. void construct(implementation_type& impl) { handle_service_.construct(impl); } // Move-construct a new serial port implementation. void move_construct(implementation_type& impl, implementation_type& other_impl) { handle_service_.move_construct(impl, other_impl); } // Move-assign from another serial port implementation. void move_assign(implementation_type& impl, win_iocp_serial_port_service& other_service, implementation_type& other_impl) { handle_service_.move_assign(impl, other_service.handle_service_, other_impl); } // Destroy a serial port implementation. void destroy(implementation_type& impl) { handle_service_.destroy(impl); } // Open the serial port using the specified device name. ASIO_DECL asio::error_code open(implementation_type& impl, const std::string& device, asio::error_code& ec); // Assign a native handle to a serial port implementation. asio::error_code assign(implementation_type& impl, const native_handle_type& handle, asio::error_code& ec) { return handle_service_.assign(impl, handle, ec); } // Determine whether the serial port is open. bool is_open(const implementation_type& impl) const { return handle_service_.is_open(impl); } // Destroy a serial port implementation. asio::error_code close(implementation_type& impl, asio::error_code& ec) { return handle_service_.close(impl, ec); } // Get the native serial port representation. native_handle_type native_handle(implementation_type& impl) { return handle_service_.native_handle(impl); } // Cancel all operations associated with the handle. asio::error_code cancel(implementation_type& impl, asio::error_code& ec) { return handle_service_.cancel(impl, ec); } // Set an option on the serial port. template asio::error_code set_option(implementation_type& impl, const SettableSerialPortOption& option, asio::error_code& ec) { return do_set_option(impl, &win_iocp_serial_port_service::store_option, &option, ec); } // Get an option from the serial port. template asio::error_code get_option(const implementation_type& impl, GettableSerialPortOption& option, asio::error_code& ec) const { return do_get_option(impl, &win_iocp_serial_port_service::load_option, &option, ec); } // Send a break sequence to the serial port. asio::error_code send_break(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Write the given data. Returns the number of bytes sent. template size_t write_some(implementation_type& impl, const ConstBufferSequence& buffers, asio::error_code& ec) { return handle_service_.write_some(impl, buffers, ec); } // Start an asynchronous write. The data being written must be valid for the // lifetime of the asynchronous operation. template void async_write_some(implementation_type& impl, const ConstBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { handle_service_.async_write_some(impl, buffers, handler, io_ex); } // Read some data. Returns the number of bytes received. template size_t read_some(implementation_type& impl, const MutableBufferSequence& buffers, asio::error_code& ec) { return handle_service_.read_some(impl, buffers, ec); } // Start an asynchronous read. The buffer for the data being received must be // valid for the lifetime of the asynchronous operation. template void async_read_some(implementation_type& impl, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) { handle_service_.async_read_some(impl, buffers, handler, io_ex); } private: // Function pointer type for storing a serial port option. typedef asio::error_code (*store_function_type)( const void*, ::DCB&, asio::error_code&); // Helper function template to store a serial port option. template static asio::error_code store_option(const void* option, ::DCB& storage, asio::error_code& ec) { static_cast(option)->store(storage, ec); return ec; } // Helper function to set a serial port option. ASIO_DECL asio::error_code do_set_option( implementation_type& impl, store_function_type store, const void* option, asio::error_code& ec); // Function pointer type for loading a serial port option. typedef asio::error_code (*load_function_type)( void*, const ::DCB&, asio::error_code&); // Helper function template to load a serial port option. template static asio::error_code load_option(void* option, const ::DCB& storage, asio::error_code& ec) { static_cast(option)->load(storage, ec); return ec; } // Helper function to get a serial port option. ASIO_DECL asio::error_code do_get_option( const implementation_type& impl, load_function_type load, void* option, asio::error_code& ec) const; // The implementation used for initiating asynchronous operations. win_iocp_handle_service handle_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_iocp_serial_port_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT) #endif // ASIO_DETAIL_WIN_IOCP_SERIAL_PORT_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/cstddef.hpp000644 000164 177776 00000001337 15107057155 021775 0ustar00jenkinsnogroup000000 000000 // // detail/cstddef.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CSTDDEF_HPP #define ASIO_DETAIL_CSTDDEF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include namespace asio { #if defined(ASIO_HAS_NULLPTR) using std::nullptr_t; #else // defined(ASIO_HAS_NULLPTR) struct nullptr_t {}; #endif // defined(ASIO_HAS_NULLPTR) } // namespace asio #endif // ASIO_DETAIL_CSTDDEF_HPP galera-4-26.4.25/asio/asio/detail/wrapped_handler.hpp000644 000164 177776 00000017705 15107057155 023526 0ustar00jenkinsnogroup000000 000000 // // detail/wrapped_handler.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WRAPPED_HANDLER_HPP #define ASIO_DETAIL_WRAPPED_HANDLER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/bind_handler.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_cont_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct is_continuation_delegated { template bool operator()(Dispatcher&, Handler& handler) const { return asio_handler_cont_helpers::is_continuation(handler); } }; struct is_continuation_if_running { template bool operator()(Dispatcher& dispatcher, Handler&) const { return dispatcher.running_in_this_thread(); } }; template class wrapped_handler { public: typedef void result_type; wrapped_handler(Dispatcher dispatcher, Handler& handler) : dispatcher_(dispatcher), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) wrapped_handler(const wrapped_handler& other) : dispatcher_(other.dispatcher_), handler_(other.handler_) { } wrapped_handler(wrapped_handler&& other) : dispatcher_(other.dispatcher_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { dispatcher_.dispatch(ASIO_MOVE_CAST(Handler)(handler_)); } void operator()() const { dispatcher_.dispatch(handler_); } template void operator()(const Arg1& arg1) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1)); } template void operator()(const Arg1& arg1, const Arg2& arg2) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3) const { dispatcher_.dispatch(detail::bind_handler(handler_, arg1, arg2, arg3)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } template void operator()(const Arg1& arg1, const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5) const { dispatcher_.dispatch( detail::bind_handler(handler_, arg1, arg2, arg3, arg4, arg5)); } //private: Dispatcher dispatcher_; Handler handler_; }; template class rewrapped_handler { public: explicit rewrapped_handler(Handler& handler, const Context& context) : context_(context), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } explicit rewrapped_handler(const Handler& handler, const Context& context) : context_(context), handler_(handler) { } #if defined(ASIO_HAS_MOVE) rewrapped_handler(const rewrapped_handler& other) : context_(other.context_), handler_(other.handler_) { } rewrapped_handler(rewrapped_handler&& other) : context_(ASIO_MOVE_CAST(Context)(other.context_)), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()() { handler_(); } void operator()() const { handler_(); } //private: Context context_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, wrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, wrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( wrapped_handler* this_handler) { return IsContinuation()(this_handler->dispatcher_, this_handler->handler_); } template inline void asio_handler_invoke(Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void asio_handler_invoke(const Function& function, wrapped_handler* this_handler) { this_handler->dispatcher_.dispatch( rewrapped_handler( function, this_handler->handler_)); } template inline void* asio_handler_allocate(std::size_t size, rewrapped_handler* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->context_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, rewrapped_handler* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->context_); } template inline bool asio_handler_is_continuation( rewrapped_handler* this_handler) { return asio_handler_cont_helpers::is_continuation( this_handler->context_); } template inline void asio_handler_invoke(Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } template inline void asio_handler_invoke(const Function& function, rewrapped_handler* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->context_); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WRAPPED_HANDLER_HPP galera-4-26.4.25/asio/asio/detail/reactive_null_buffers_op.hpp000644 000164 177776 00000005537 15107057155 025435 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_null_buffers_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #define ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_null_buffers_op : public reactor_op { public: ASIO_DEFINE_HANDLER_PTR(reactive_null_buffers_op); reactive_null_buffers_op(Handler& handler, const IoExecutor& io_ex) : reactor_op(&reactive_null_buffers_op::do_perform, &reactive_null_buffers_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static status do_perform(reactor_op*) { return done; } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_null_buffers_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_NULL_BUFFERS_OP_HPP galera-4-26.4.25/asio/asio/detail/win_fd_set_adapter.hpp000644 000164 177776 00000007336 15107057155 024207 0ustar00jenkinsnogroup000000 000000 // // detail/win_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class win_fd_set_adapter : noncopyable { public: enum { default_fd_set_size = 1024 }; win_fd_set_adapter() : capacity_(default_fd_set_size), max_descriptor_(invalid_socket) { fd_set_ = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (capacity_))); fd_set_->fd_count = 0; } ~win_fd_set_adapter() { ::operator delete(fd_set_); } void reset() { fd_set_->fd_count = 0; max_descriptor_ = invalid_socket; } bool set(socket_type descriptor) { for (u_int i = 0; i < fd_set_->fd_count; ++i) if (fd_set_->fd_array[i] == descriptor) return true; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = descriptor; return true; } void set(reactor_op_queue& operations, op_queue&) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; reserve(fd_set_->fd_count + 1); fd_set_->fd_array[fd_set_->fd_count++] = op_iter->first; } } bool is_set(socket_type descriptor) const { return !!__WSAFDIsSet(descriptor, const_cast(reinterpret_cast(fd_set_))); } operator fd_set*() { return reinterpret_cast(fd_set_); } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { for (u_int i = 0; i < fd_set_->fd_count; ++i) operations.perform_operations(fd_set_->fd_array[i], ops); } private: // This structure is defined to be compatible with the Windows API fd_set // structure, but without being dependent on the value of FD_SETSIZE. We use // the "struct hack" to allow the number of descriptors to be varied at // runtime. struct win_fd_set { u_int fd_count; SOCKET fd_array[1]; }; // Increase the fd_set_ capacity to at least the specified number of elements. void reserve(u_int n) { if (n <= capacity_) return; u_int new_capacity = capacity_ + capacity_ / 2; if (new_capacity < n) new_capacity = n; win_fd_set* new_fd_set = static_cast(::operator new( sizeof(win_fd_set) - sizeof(SOCKET) + sizeof(SOCKET) * (new_capacity))); new_fd_set->fd_count = fd_set_->fd_count; for (u_int i = 0; i < fd_set_->fd_count; ++i) new_fd_set->fd_array[i] = fd_set_->fd_array[i]; ::operator delete(fd_set_); fd_set_ = new_fd_set; capacity_ = new_capacity; } win_fd_set* fd_set_; u_int capacity_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // ASIO_DETAIL_WIN_FD_SET_ADAPTER_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_send_op.hpp000644 000164 177776 00000010512 15107057155 025235 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_send_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_send_op_base : public reactor_op { public: reactive_socket_send_op_base(socket_type socket, socket_ops::state_type state, const ConstBufferSequence& buffers, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_send_op_base::do_perform, complete_func), socket_(socket), state_(state), buffers_(buffers), flags_(flags) { } static status do_perform(reactor_op* base) { reactive_socket_send_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); status result = socket_ops::non_blocking_send(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->ec_, o->bytes_transferred_) ? done : not_done; if (result == done) if ((o->state_ & socket_ops::stream_oriented) != 0) if (o->bytes_transferred_ < bufs.total_size()) result = done_and_exhausted; ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_send", o->ec_, o->bytes_transferred_)); return result; } private: socket_type socket_; socket_ops::state_type state_; ConstBufferSequence buffers_; socket_base::message_flags flags_; }; template class reactive_socket_send_op : public reactive_socket_send_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_send_op); reactive_socket_send_op(socket_type socket, socket_ops::state_type state, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) : reactive_socket_send_op_base(socket, state, buffers, flags, &reactive_socket_send_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_send_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_SEND_OP_HPP galera-4-26.4.25/asio/asio/detail/buffer_sequence_adapter.hpp000644 000164 177776 00000027672 15107057155 025234 0ustar00jenkinsnogroup000000 000000 // // detail/buffer_sequence_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #define ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/array_fwd.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffer_sequence_adapter_base { #if defined(ASIO_WINDOWS_RUNTIME) public: // The maximum number of buffers to support in a single operation. enum { max_buffers = 1 }; protected: typedef Windows::Storage::Streams::IBuffer^ native_buffer_type; ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::mutable_buffer& buffer); ASIO_DECL static void init_native_buffer( native_buffer_type& buf, const asio::const_buffer& buffer); #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) public: // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; protected: typedef WSABUF native_buffer_type; static void init_native_buffer(WSABUF& buf, const asio::mutable_buffer& buffer) { buf.buf = static_cast(buffer.data()); buf.len = static_cast(buffer.size()); } static void init_native_buffer(WSABUF& buf, const asio::const_buffer& buffer) { buf.buf = const_cast(static_cast(buffer.data())); buf.len = static_cast(buffer.size()); } #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) public: // The maximum number of buffers to support in a single operation. enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len }; protected: typedef iovec native_buffer_type; static void init_iov_base(void*& base, void* addr) { base = addr; } template static void init_iov_base(T& base, void* addr) { base = static_cast(addr); } static void init_native_buffer(iovec& iov, const asio::mutable_buffer& buffer) { init_iov_base(iov.iov_base, buffer.data()); iov.iov_len = buffer.size(); } static void init_native_buffer(iovec& iov, const asio::const_buffer& buffer) { init_iov_base(iov.iov_base, const_cast(buffer.data())); iov.iov_len = buffer.size(); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) }; // Helper class to translate buffers into the native buffer representation. template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter(const Buffers& buffer_sequence) : count_(0), total_buffer_size_(0) { buffer_sequence_adapter::init( asio::buffer_sequence_begin(buffer_sequence), asio::buffer_sequence_end(buffer_sequence)); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return count_; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const Buffers& buffer_sequence) { return buffer_sequence_adapter::all_empty( asio::buffer_sequence_begin(buffer_sequence), asio::buffer_sequence_end(buffer_sequence)); } static void validate(const Buffers& buffer_sequence) { buffer_sequence_adapter::validate( asio::buffer_sequence_begin(buffer_sequence), asio::buffer_sequence_end(buffer_sequence)); } static Buffer first(const Buffers& buffer_sequence) { return buffer_sequence_adapter::first( asio::buffer_sequence_begin(buffer_sequence), asio::buffer_sequence_end(buffer_sequence)); } private: template void init(Iterator begin, Iterator end) { Iterator iter = begin; for (; iter != end && count_ < max_buffers; ++iter, ++count_) { Buffer buffer(*iter); init_native_buffer(buffers_[count_], buffer); total_buffer_size_ += buffer.size(); } } template static bool all_empty(Iterator begin, Iterator end) { Iterator iter = begin; std::size_t i = 0; for (; iter != end && i < max_buffers; ++iter, ++i) if (Buffer(*iter).size() > 0) return false; return true; } template static void validate(Iterator begin, Iterator end) { Iterator iter = begin; for (; iter != end; ++iter) { Buffer buffer(*iter); buffer.data(); } } template static Buffer first(Iterator begin, Iterator end) { Iterator iter = begin; for (; iter != end; ++iter) { Buffer buffer(*iter); if (buffer.size() != 0) return buffer; } return Buffer(); } native_buffer_type buffers_[max_buffers]; std::size_t count_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::mutable_buffer& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = buffer_sequence.size(); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::mutable_buffer& buffer_sequence) { return buffer_sequence.size() == 0; } static void validate(const asio::mutable_buffer& buffer_sequence) { buffer_sequence.data(); } static Buffer first(const asio::mutable_buffer& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::const_buffer& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = buffer_sequence.size(); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::const_buffer& buffer_sequence) { return buffer_sequence.size() == 0; } static void validate(const asio::const_buffer& buffer_sequence) { buffer_sequence.data(); } static Buffer first(const asio::const_buffer& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; #if !defined(ASIO_NO_DEPRECATED) template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::mutable_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = buffer_sequence.size(); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::mutable_buffers_1& buffer_sequence) { return buffer_sequence.size() == 0; } static void validate(const asio::mutable_buffers_1& buffer_sequence) { buffer_sequence.data(); } static Buffer first(const asio::mutable_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; template class buffer_sequence_adapter : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const asio::const_buffers_1& buffer_sequence) { init_native_buffer(buffer_, Buffer(buffer_sequence)); total_buffer_size_ = buffer_sequence.size(); } native_buffer_type* buffers() { return &buffer_; } std::size_t count() const { return 1; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const asio::const_buffers_1& buffer_sequence) { return buffer_sequence.size() == 0; } static void validate(const asio::const_buffers_1& buffer_sequence) { buffer_sequence.data(); } static Buffer first(const asio::const_buffers_1& buffer_sequence) { return Buffer(buffer_sequence); } private: native_buffer_type buffer_; std::size_t total_buffer_size_; }; #endif // !defined(ASIO_NO_DEPRECATED) template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const boost::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size(); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const boost::array& buffer_sequence) { return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0; } static void validate(const boost::array& buffer_sequence) { buffer_sequence[0].data(); buffer_sequence[1].data(); } static Buffer first(const boost::array& buffer_sequence) { return Buffer(buffer_sequence[0].size() != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #if defined(ASIO_HAS_STD_ARRAY) template class buffer_sequence_adapter > : buffer_sequence_adapter_base { public: explicit buffer_sequence_adapter( const std::array& buffer_sequence) { init_native_buffer(buffers_[0], Buffer(buffer_sequence[0])); init_native_buffer(buffers_[1], Buffer(buffer_sequence[1])); total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size(); } native_buffer_type* buffers() { return buffers_; } std::size_t count() const { return 2; } std::size_t total_size() const { return total_buffer_size_; } bool all_empty() const { return total_buffer_size_ == 0; } static bool all_empty(const std::array& buffer_sequence) { return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0; } static void validate(const std::array& buffer_sequence) { buffer_sequence[0].data(); buffer_sequence[1].data(); } static Buffer first(const std::array& buffer_sequence) { return Buffer(buffer_sequence[0].size() != 0 ? buffer_sequence[0] : buffer_sequence[1]); } private: native_buffer_type buffers_[2]; std::size_t total_buffer_size_; }; #endif // defined(ASIO_HAS_STD_ARRAY) } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/buffer_sequence_adapter.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP galera-4-26.4.25/asio/asio/detail/call_stack.hpp000644 000164 177776 00000005544 15107057155 022465 0ustar00jenkinsnogroup000000 000000 // // detail/call_stack.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CALL_STACK_HPP #define ASIO_DETAIL_CALL_STACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to determine whether or not the current thread is inside an // invocation of io_context::run() for a specified io_context object. template class call_stack { public: // Context class automatically pushes the key/value pair on to the stack. class context : private noncopyable { public: // Push the key on to the stack. explicit context(Key* k) : key_(k), next_(call_stack::top_) { value_ = reinterpret_cast(this); call_stack::top_ = this; } // Push the key/value pair on to the stack. context(Key* k, Value& v) : key_(k), value_(&v), next_(call_stack::top_) { call_stack::top_ = this; } // Pop the key/value pair from the stack. ~context() { call_stack::top_ = next_; } // Find the next context with the same key. Value* next_by_key() const { context* elem = next_; while (elem) { if (elem->key_ == key_) return elem->value_; elem = elem->next_; } return 0; } private: friend class call_stack; // The key associated with the context. Key* key_; // The value associated with the context. Value* value_; // The next element in the stack. context* next_; }; friend class context; // Determine whether the specified owner is on the stack. Returns address of // key if present, 0 otherwise. static Value* contains(Key* k) { context* elem = top_; while (elem) { if (elem->key_ == k) return elem->value_; elem = elem->next_; } return 0; } // Obtain the value at the top of the stack. static Value* top() { context* elem = top_; return elem ? elem->value_ : 0; } private: // The top of the stack of calls for the current thread. static tss_ptr top_; }; template tss_ptr::context> call_stack::top_; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CALL_STACK_HPP galera-4-26.4.25/asio/asio/detail/winrt_async_op.hpp000644 000164 177776 00000002554 15107057155 023421 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_async_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_ASYNC_OP_HPP #define ASIO_DETAIL_WINRT_ASYNC_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The result of the operation, to be passed to the completion handler. TResult result_; protected: winrt_async_op(func_type complete_func) : operation(complete_func), result_() { } }; template <> class winrt_async_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; protected: winrt_async_op(func_type complete_func) : operation(complete_func) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WINRT_ASYNC_OP_HPP galera-4-26.4.25/asio/asio/detail/win_global.hpp000644 000164 177776 00000003241 15107057155 022472 0ustar00jenkinsnogroup000000 000000 // // detail/win_global.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_GLOBAL_HPP #define ASIO_DETAIL_WIN_GLOBAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/static_mutex.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct win_global_impl { // Destructor automatically cleans up the global. ~win_global_impl() { delete ptr_; } static win_global_impl instance_; static static_mutex mutex_; T* ptr_; static tss_ptr tss_ptr_; }; template win_global_impl win_global_impl::instance_ = { 0 }; template static_mutex win_global_impl::mutex_ = ASIO_STATIC_MUTEX_INIT; template tss_ptr win_global_impl::tss_ptr_; template T& win_global() { if (static_cast(win_global_impl::tss_ptr_) == 0) { win_global_impl::mutex_.init(); static_mutex::scoped_lock lock(win_global_impl::mutex_); if (win_global_impl::instance_.ptr_ == 0) win_global_impl::instance_.ptr_ = new T; win_global_impl::tss_ptr_ = win_global_impl::instance_.ptr_; } return *win_global_impl::tss_ptr_; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_WIN_GLOBAL_HPP galera-4-26.4.25/asio/asio/detail/win_event.hpp000644 000164 177776 00000006475 15107057155 022367 0ustar00jenkinsnogroup000000 000000 // // detail/win_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_EVENT_HPP #define ASIO_DETAIL_WIN_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_event : private noncopyable { public: // Constructor. ASIO_DECL win_event(); // Destructor. ASIO_DECL ~win_event(); // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; ::SetEvent(events_[0]); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) ::SetEvent(events_[1]); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); ::SetEvent(events_[1]); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; ::ResetEvent(events_[0]); state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); while ((state_ & 1) == 0) { state_ += 2; lock.unlock(); #if defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjectsEx(2, events_, false, INFINITE, false); #else // defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjects(2, events_, false, INFINITE); #endif // defined(ASIO_WINDOWS_APP) lock.lock(); state_ -= 2; } } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock& lock, long usec) { ASIO_ASSERT(lock.locked()); if ((state_ & 1) == 0) { state_ += 2; lock.unlock(); DWORD msec = usec > 0 ? (usec < 1000 ? 1 : usec / 1000) : 0; #if defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjectsEx(2, events_, false, msec, false); #else // defined(ASIO_WINDOWS_APP) ::WaitForMultipleObjects(2, events_, false, msec); #endif // defined(ASIO_WINDOWS_APP) lock.lock(); state_ -= 2; } return (state_ & 1) != 0; } private: HANDLE events_[2]; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_event.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_EVENT_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_operation.hpp000644 000164 177776 00000003771 15107057155 024254 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_operation.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #define ASIO_DETAIL_WIN_IOCP_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/handler_tracking.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_iocp_io_context; // Base class for all operations. A function pointer is used instead of virtual // functions to avoid the associated overhead. class win_iocp_operation : public OVERLAPPED ASIO_ALSO_INHERIT_TRACKED_HANDLER { public: typedef win_iocp_operation operation_type; void complete(void* owner, const asio::error_code& ec, std::size_t bytes_transferred) { func_(owner, this, ec, bytes_transferred); } void destroy() { func_(0, this, asio::error_code(), 0); } protected: typedef void (*func_type)( void*, win_iocp_operation*, const asio::error_code&, std::size_t); win_iocp_operation(func_type func) : next_(0), func_(func) { reset(); } // Prevents deletion through this type. ~win_iocp_operation() { } void reset() { Internal = 0; InternalHigh = 0; Offset = 0; OffsetHigh = 0; hEvent = 0; ready_ = 0; } private: friend class op_queue_access; friend class win_iocp_io_context; win_iocp_operation* next_; func_type func_; long ready_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OPERATION_HPP galera-4-26.4.25/asio/asio/detail/operation.hpp000644 000164 177776 00000001554 15107057155 022362 0ustar00jenkinsnogroup000000 000000 // // detail/operation.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_OPERATION_HPP #define ASIO_DETAIL_OPERATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_operation.hpp" #else # include "asio/detail/scheduler_operation.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_HAS_IOCP) typedef win_iocp_operation operation; #else typedef scheduler_operation operation; #endif } // namespace detail } // namespace asio #endif // ASIO_DETAIL_OPERATION_HPP galera-4-26.4.25/asio/asio/detail/resolve_query_op.hpp000644 000164 177776 00000011163 15107057155 023761 0ustar00jenkinsnogroup000000 000000 // // detail/resolve_query_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVE_QUERY_OP_HPP #define ASIO_DETAIL_RESOLVE_QUERY_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/resolve_op.hpp" #include "asio/detail/socket_ops.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class resolve_query_op : public resolve_op { public: ASIO_DEFINE_HANDLER_PTR(resolve_query_op); typedef asio::ip::basic_resolver_query query_type; typedef asio::ip::basic_resolver_results results_type; #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif resolve_query_op(socket_ops::weak_cancel_token_type cancel_token, const query_type& query, scheduler_impl& sched, Handler& handler, const IoExecutor& io_ex) : resolve_op(&resolve_query_op::do_complete), cancel_token_(cancel_token), query_(query), scheduler_(sched), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex), addrinfo_(0) { handler_work::start(handler_, io_executor_); } ~resolve_query_op() { if (addrinfo_) socket_ops::freeaddrinfo(addrinfo_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the operation object. resolve_query_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; if (owner && owner != &o->scheduler_) { // The operation is being run on the worker io_context. Time to perform // the resolver operation. // Perform the blocking host resolution operation. socket_ops::background_getaddrinfo(o->cancel_token_, o->query_.host_name().c_str(), o->query_.service_name().c_str(), o->query_.hints(), &o->addrinfo_, o->ec_); // Pass operation back to main io_context for completion. o->scheduler_.post_deferred_completion(o); p.v = p.p = 0; } else { // The operation has been returned to the main io_context. The completion // handler is ready to be delivered. // Take ownership of the operation's outstanding work. handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated // before the upcall is made. Even if we're not about to make an upcall, // a sub-object of the handler may be the true owner of the memory // associated with the handler. Consequently, a local copy of the handler // is required to ensure that any owning sub-object remains valid until // after we have deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, results_type()); p.h = asio::detail::addressof(handler.handler_); if (o->addrinfo_) { handler.arg2_ = results_type::create(o->addrinfo_, o->query_.host_name(), o->query_.service_name()); } p.reset(); if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, "...")); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } } private: socket_ops::weak_cancel_token_type cancel_token_; query_type query_; scheduler_impl& scheduler_; Handler handler_; IoExecutor io_executor_; asio::detail::addrinfo_type* addrinfo_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_RESOLVE_QUERY_OP_HPP galera-4-26.4.25/asio/asio/detail/winrt_resolver_service.hpp000644 000164 177776 00000013570 15107057155 025167 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_resolver_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #define ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/post.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_resolve_op.hpp" #include "asio/detail/winrt_utils.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class winrt_resolver_service : public execution_context_service_base > { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the asynchronous operation that the operation has been // cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The query type. typedef asio::ip::basic_resolver_query query_type; // The results type. typedef asio::ip::basic_resolver_results results_type; // Constructor. winrt_resolver_service(execution_context& context) : execution_context_service_base< winrt_resolver_service >(context), scheduler_(use_service(context)), async_manager_(use_service(context)) { } // Destructor. ~winrt_resolver_service() { } // Destroy all user-defined handler objects owned by the service. void shutdown() { } // Perform any fork-related housekeeping. void notify_fork(execution_context::fork_event) { } // Construct a new resolver implementation. void construct(implementation_type&) { } // Move-construct a new resolver implementation. void move_construct(implementation_type&, implementation_type&) { } // Move-assign from another resolver implementation. void move_assign(implementation_type&, winrt_resolver_service&, implementation_type&) { } // Destroy a resolver implementation. void destroy(implementation_type&) { } // Cancel pending asynchronous operations. void cancel(implementation_type&) { } // Resolve a query to a list of entries. results_type resolve(implementation_type&, const query_type& query, asio::error_code& ec) { try { using namespace Windows::Networking::Sockets; auto endpoint_pairs = async_manager_.sync( DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), ec); if (ec) return results_type(); return results_type::create( endpoint_pairs, query.hints(), query.host_name(), query.service_name()); } catch (Platform::Exception^ e) { ec = asio::error_code(e->HResult, asio::system_category()); return results_type(); } } // Asynchronously resolve a query to a list of entries. template void async_resolve(implementation_type& impl, const query_type& query, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_resolve_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(query, handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "resolver", &impl, 0, "async_resolve")); (void)impl; try { using namespace Windows::Networking::Sockets; async_manager_.async(DatagramSocket::GetEndpointPairsAsync( winrt_utils::host_name(query.host_name()), winrt_utils::string(query.service_name())), p.p); p.v = p.p = 0; } catch (Platform::Exception^ e) { p.p->ec_ = asio::error_code( e->HResult, asio::system_category()); scheduler_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; } } // Resolve an endpoint to a list of entries. results_type resolve(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return results_type(); } // Asynchronously resolve an endpoint to a list of entries. template void async_resolve(implementation_type&, const endpoint_type&, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const results_type results; asio::post(io_ex, detail::bind_handler(handler, ec, results)); } private: // The scheduler implementation used for delivering completions. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; winrt_async_manager& async_manager_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_RESOLVER_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_socket_recv_op.hpp000644 000164 177776 00000007451 15107057155 025260 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_socket_recv_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #define ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_socket_recv_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_socket_recv_op); win_iocp_socket_recv_op(socket_ops::state_type state, socket_ops::weak_cancel_token_type cancel_token, const MutableBufferSequence& buffers, Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_socket_recv_op::do_complete), state_(state), cancel_token_(cancel_token), buffers_(buffers), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& result_ec, std::size_t bytes_transferred) { asio::error_code ec(result_ec); // Take ownership of the operation object. win_iocp_socket_recv_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) // Check whether buffers are still valid. if (owner) { buffer_sequence_adapter::validate(o->buffers_); } #endif // defined(ASIO_ENABLE_BUFFER_DEBUGGING) socket_ops::complete_iocp_recv(o->state_, o->cancel_token_, buffer_sequence_adapter::all_empty(o->buffers_), ec, bytes_transferred); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: socket_ops::state_type state_; socket_ops::weak_cancel_token_type cancel_token_; MutableBufferSequence buffers_; Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_SOCKET_RECV_OP_HPP galera-4-26.4.25/asio/asio/detail/date_time_fwd.hpp000644 000164 177776 00000001400 15107057155 023143 0ustar00jenkinsnogroup000000 000000 // // detail/date_time_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_DATE_TIME_FWD_HPP #define ASIO_DETAIL_DATE_TIME_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" namespace boost { namespace date_time { template class base_time; } // namespace date_time namespace posix_time { class ptime; } // namespace posix_time } // namespace boost #endif // ASIO_DETAIL_DATE_TIME_FWD_HPP galera-4-26.4.25/asio/asio/detail/null_reactor.hpp000644 000164 177776 00000002645 15107057155 023055 0ustar00jenkinsnogroup000000 000000 // // detail/null_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_REACTOR_HPP #define ASIO_DETAIL_NULL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/scheduler_operation.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_reactor : public execution_context_service_base { public: // Constructor. null_reactor(asio::execution_context& ctx) : execution_context_service_base(ctx) { } // Destructor. ~null_reactor() { } // Destroy all user-defined handler objects owned by the service. void shutdown() { } // No-op because should never be called. void run(long /*usec*/, op_queue& /*ops*/) { } // No-op. void interrupt() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/posix_fd_set_adapter.hpp000644 000164 177776 00000005462 15107057155 024552 0ustar00jenkinsnogroup000000 000000 // // detail/posix_fd_set_adapter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #define ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) \ && !defined(__CYGWIN__) \ && !defined(ASIO_WINDOWS_RUNTIME) #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Adapts the FD_SET type to meet the Descriptor_Set concept's requirements. class posix_fd_set_adapter : noncopyable { public: posix_fd_set_adapter() : max_descriptor_(invalid_socket) { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } void reset() { using namespace std; // Needed for memset on Solaris. FD_ZERO(&fd_set_); } bool set(socket_type descriptor) { if (descriptor < (socket_type)FD_SETSIZE) { if (max_descriptor_ == invalid_socket || descriptor > max_descriptor_) max_descriptor_ = descriptor; FD_SET(descriptor, &fd_set_); return true; } return false; } void set(reactor_op_queue& operations, op_queue& ops) { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (!set(op_iter->first)) { asio::error_code ec(error::fd_set_failure); operations.cancel_operations(op_iter, ops, ec); } } } bool is_set(socket_type descriptor) const { return FD_ISSET(descriptor, &fd_set_) != 0; } operator fd_set*() { return &fd_set_; } socket_type max_descriptor() const { return max_descriptor_; } void perform(reactor_op_queue& operations, op_queue& ops) const { reactor_op_queue::iterator i = operations.begin(); while (i != operations.end()) { reactor_op_queue::iterator op_iter = i++; if (is_set(op_iter->first)) operations.perform_operations(op_iter, ops); } } private: mutable fd_set fd_set_; socket_type max_descriptor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) // && !defined(__CYGWIN__) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_POSIX_FD_SET_ADAPTER_HPP galera-4-26.4.25/asio/asio/detail/posix_static_mutex.hpp000644 000164 177776 00000002521 15107057155 024310 0ustar00jenkinsnogroup000000 000000 // // detail/posix_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #define ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct posix_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { // Nothing to do. } // Lock the mutex. void lock() { (void)::pthread_mutex_lock(&mutex_); // Ignore EINVAL. } // Unlock the mutex. void unlock() { (void)::pthread_mutex_unlock(&mutex_); // Ignore EINVAL. } ::pthread_mutex_t mutex_; }; #define ASIO_POSIX_STATIC_MUTEX_INIT { PTHREAD_MUTEX_INITIALIZER } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_STATIC_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/scheduler_thread_info.hpp000644 000164 177776 00000001746 15107057155 024705 0ustar00jenkinsnogroup000000 000000 // // detail/scheduler_thread_info.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP #define ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/op_queue.hpp" #include "asio/detail/thread_info_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class scheduler; class scheduler_operation; struct scheduler_thread_info : public thread_info_base { op_queue private_op_queue; long private_outstanding_work; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SCHEDULER_THREAD_INFO_HPP galera-4-26.4.25/asio/asio/detail/consuming_buffers.hpp000644 000164 177776 00000024152 15107057155 024077 0ustar00jenkinsnogroup000000 000000 // // detail/consuming_buffers.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONSUMING_BUFFERS_HPP #define ASIO_DETAIL_CONSUMING_BUFFERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/buffer.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper template to determine the maximum number of prepared buffers. template struct prepared_buffers_max { enum { value = buffer_sequence_adapter_base::max_buffers }; }; template struct prepared_buffers_max > { enum { value = N }; }; #if defined(ASIO_HAS_STD_ARRAY) template struct prepared_buffers_max > { enum { value = N }; }; #endif // defined(ASIO_HAS_STD_ARRAY) // A buffer sequence used to represent a subsequence of the buffers. template struct prepared_buffers { typedef Buffer value_type; typedef const Buffer* const_iterator; enum { max_buffers = MaxBuffers < 16 ? MaxBuffers : 16 }; prepared_buffers() : count(0) {} const_iterator begin() const { return elems; } const_iterator end() const { return elems + count; } Buffer elems[max_buffers]; std::size_t count; }; // A proxy for a sub-range in a list of buffers. template class consuming_buffers { public: typedef prepared_buffers::value> prepared_buffers_type; // Construct to represent the entire list of buffers. explicit consuming_buffers(const Buffers& buffers) : buffers_(buffers), total_consumed_(0), next_elem_(0), next_elem_offset_(0) { using asio::buffer_size; total_size_ = buffer_size(buffers); } // Determine if we are at the end of the buffers. bool empty() const { return total_consumed_ >= total_size_; } // Get the buffer for a single transfer, with a size. prepared_buffers_type prepare(std::size_t max_size) { prepared_buffers_type result; Buffer_Iterator next = asio::buffer_sequence_begin(buffers_); Buffer_Iterator end = asio::buffer_sequence_end(buffers_); std::advance(next, next_elem_); std::size_t elem_offset = next_elem_offset_; while (next != end && max_size > 0 && (result.count) < result.max_buffers) { Buffer next_buf = Buffer(*next) + elem_offset; result.elems[result.count] = asio::buffer(next_buf, max_size); max_size -= result.elems[result.count].size(); elem_offset = 0; if (result.elems[result.count].size() > 0) ++result.count; ++next; } return result; } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { total_consumed_ += size; Buffer_Iterator next = asio::buffer_sequence_begin(buffers_); Buffer_Iterator end = asio::buffer_sequence_end(buffers_); std::advance(next, next_elem_); while (next != end && size > 0) { Buffer next_buf = Buffer(*next) + next_elem_offset_; if (size < next_buf.size()) { next_elem_offset_ += size; size = 0; } else { size -= next_buf.size(); next_elem_offset_ = 0; ++next_elem_; ++next; } } } // Get the total number of bytes consumed from the buffers. std::size_t total_consumed() const { return total_consumed_; } private: Buffers buffers_; std::size_t total_size_; std::size_t total_consumed_; std::size_t next_elem_; std::size_t next_elem_offset_; }; // Base class of all consuming_buffers specialisations for single buffers. template class consuming_single_buffer { public: // Construct to represent the entire list of buffers. template explicit consuming_single_buffer(const Buffer1& buffer) : buffer_(buffer), total_consumed_(0) { } // Determine if we are at the end of the buffers. bool empty() const { return total_consumed_ >= buffer_.size(); } // Get the buffer for a single transfer, with a size. Buffer prepare(std::size_t max_size) { return asio::buffer(buffer_ + total_consumed_, max_size); } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { total_consumed_ += size; } // Get the total number of bytes consumed from the buffers. std::size_t total_consumed() const { return total_consumed_; } private: Buffer buffer_; std::size_t total_consumed_; }; template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const mutable_buffer& buffer) : consuming_single_buffer(buffer) { } }; template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const mutable_buffer& buffer) : consuming_single_buffer(buffer) { } }; template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const const_buffer& buffer) : consuming_single_buffer(buffer) { } }; #if !defined(ASIO_NO_DEPRECATED) template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const mutable_buffers_1& buffer) : consuming_single_buffer(buffer) { } }; template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const mutable_buffers_1& buffer) : consuming_single_buffer(buffer) { } }; template <> class consuming_buffers : public consuming_single_buffer { public: explicit consuming_buffers(const const_buffers_1& buffer) : consuming_single_buffer(buffer) { } }; #endif // !defined(ASIO_NO_DEPRECATED) template class consuming_buffers, typename boost::array::const_iterator> { public: // Construct to represent the entire list of buffers. explicit consuming_buffers(const boost::array& buffers) : buffers_(buffers), total_consumed_(0) { } // Determine if we are at the end of the buffers. bool empty() const { return total_consumed_ >= Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size(); } // Get the buffer for a single transfer, with a size. boost::array prepare(std::size_t max_size) { boost::array result = {{ Buffer(buffers_[0]), Buffer(buffers_[1]) }}; std::size_t buffer0_size = result[0].size(); result[0] = asio::buffer(result[0] + total_consumed_, max_size); result[1] = asio::buffer( result[1] + (total_consumed_ < buffer0_size ? 0 : total_consumed_ - buffer0_size), max_size - result[0].size()); return result; } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { total_consumed_ += size; } // Get the total number of bytes consumed from the buffers. std::size_t total_consumed() const { return total_consumed_; } private: boost::array buffers_; std::size_t total_consumed_; }; #if defined(ASIO_HAS_STD_ARRAY) template class consuming_buffers, typename std::array::const_iterator> { public: // Construct to represent the entire list of buffers. explicit consuming_buffers(const std::array& buffers) : buffers_(buffers), total_consumed_(0) { } // Determine if we are at the end of the buffers. bool empty() const { return total_consumed_ >= Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size(); } // Get the buffer for a single transfer, with a size. std::array prepare(std::size_t max_size) { std::array result = {{ Buffer(buffers_[0]), Buffer(buffers_[1]) }}; std::size_t buffer0_size = result[0].size(); result[0] = asio::buffer(result[0] + total_consumed_, max_size); result[1] = asio::buffer( result[1] + (total_consumed_ < buffer0_size ? 0 : total_consumed_ - buffer0_size), max_size - result[0].size()); return result; } // Consume the specified number of bytes from the buffers. void consume(std::size_t size) { total_consumed_ += size; } // Get the total number of bytes consumed from the buffers. std::size_t total_consumed() const { return total_consumed_; } private: std::array buffers_; std::size_t total_consumed_; }; #endif // defined(ASIO_HAS_STD_ARRAY) // Specialisation for null_buffers to ensure that the null_buffers type is // always passed through to the underlying read or write operation. template class consuming_buffers : public asio::null_buffers { public: consuming_buffers(const null_buffers&) { // No-op. } bool empty() { return false; } null_buffers prepare(std::size_t) { return null_buffers(); } void consume(std::size_t) { // No-op. } std::size_t total_consumed() const { return 0; } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CONSUMING_BUFFERS_HPP galera-4-26.4.25/asio/asio/detail/win_tss_ptr.hpp000644 000164 177776 00000003125 15107057155 022731 0ustar00jenkinsnogroup000000 000000 // // detail/win_tss_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_TSS_PTR_HPP #define ASIO_DETAIL_WIN_TSS_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) #include "asio/detail/noncopyable.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper function to create thread-specific storage. ASIO_DECL DWORD win_tss_ptr_create(); template class win_tss_ptr : private noncopyable { public: // Constructor. win_tss_ptr() : tss_key_(win_tss_ptr_create()) { } // Destructor. ~win_tss_ptr() { ::TlsFree(tss_key_); } // Get the value. operator T*() const { return static_cast(::TlsGetValue(tss_key_)); } // Set the value. void operator=(T* value) { ::TlsSetValue(tss_key_, value); } private: // Thread-specific storage to allow unlocked access to determine whether a // thread is a member of the pool. DWORD tss_key_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/win_tss_ptr.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS) #endif // ASIO_DETAIL_WIN_TSS_PTR_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_recvfrom_op.hpp000644 000164 177776 00000011150 15107057155 026126 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_recvfrom_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class reactive_socket_recvfrom_op_base : public reactor_op { public: reactive_socket_recvfrom_op_base(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, func_type complete_func) : reactor_op(&reactive_socket_recvfrom_op_base::do_perform, complete_func), socket_(socket), protocol_type_(protocol_type), buffers_(buffers), sender_endpoint_(endpoint), flags_(flags) { } static status do_perform(reactor_op* base) { reactive_socket_recvfrom_op_base* o( static_cast(base)); buffer_sequence_adapter bufs(o->buffers_); std::size_t addr_len = o->sender_endpoint_.capacity(); status result = socket_ops::non_blocking_recvfrom(o->socket_, bufs.buffers(), bufs.count(), o->flags_, o->sender_endpoint_.data(), &addr_len, o->ec_, o->bytes_transferred_) ? done : not_done; if (result && !o->ec_) o->sender_endpoint_.resize(addr_len); ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_recvfrom", o->ec_, o->bytes_transferred_)); return result; } private: socket_type socket_; int protocol_type_; MutableBufferSequence buffers_; Endpoint& sender_endpoint_; socket_base::message_flags flags_; }; template class reactive_socket_recvfrom_op : public reactive_socket_recvfrom_op_base { public: ASIO_DEFINE_HANDLER_PTR(reactive_socket_recvfrom_op); reactive_socket_recvfrom_op(socket_type socket, int protocol_type, const MutableBufferSequence& buffers, Endpoint& endpoint, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) : reactive_socket_recvfrom_op_base( socket, protocol_type, buffers, endpoint, flags, &reactive_socket_recvfrom_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/) { // Take ownership of the handler object. reactive_socket_recvfrom_op* o( static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, o->ec_, o->bytes_transferred_); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_REACTIVE_SOCKET_RECVFROM_OP_HPP galera-4-26.4.25/asio/asio/detail/handler_work.hpp000644 000164 177776 00000006155 15107057155 023043 0ustar00jenkinsnogroup000000 000000 // // detail/handler_work.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HANDLER_WORK_HPP #define ASIO_DETAIL_HANDLER_WORK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/associated_executor.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A helper class template to allow completion handlers to be dispatched // through either the new executors framework or the old invocaton hook. The // primary template uses the new executors framework. template ::type> class handler_work { public: explicit handler_work(Handler& handler) ASIO_NOEXCEPT : io_executor_(), executor_(asio::get_associated_executor(handler, io_executor_)) { } handler_work(Handler& handler, const IoExecutor& io_ex) ASIO_NOEXCEPT : io_executor_(io_ex), executor_(asio::get_associated_executor(handler, io_executor_)) { } static void start(Handler& handler) ASIO_NOEXCEPT { HandlerExecutor ex(asio::get_associated_executor(handler)); ex.on_work_started(); } static void start(Handler& handler, const IoExecutor& io_ex) ASIO_NOEXCEPT { HandlerExecutor ex(asio::get_associated_executor(handler, io_ex)); ex.on_work_started(); io_ex.on_work_started(); } ~handler_work() { io_executor_.on_work_finished(); executor_.on_work_finished(); } template void complete(Function& function, Handler& handler) { executor_.dispatch(ASIO_MOVE_CAST(Function)(function), asio::get_associated_allocator(handler)); } private: // Disallow copying and assignment. handler_work(const handler_work&); handler_work& operator=(const handler_work&); IoExecutor io_executor_; HandlerExecutor executor_; }; // This specialisation dispatches a handler through the old invocation hook. // The specialisation is not strictly required for correctness, as the // system_executor will dispatch through the hook anyway. However, by doing // this we avoid an extra copy of the handler. template class handler_work { public: explicit handler_work(Handler&) ASIO_NOEXCEPT {} static void start(Handler&) ASIO_NOEXCEPT {} ~handler_work() {} template void complete(Function& function, Handler& handler) { asio_handler_invoke_helpers::invoke(function, handler); } private: // Disallow copying and assignment. handler_work(const handler_work&); handler_work& operator=(const handler_work&); }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HANDLER_WORK_HPP galera-4-26.4.25/asio/asio/detail/chrono_time_traits.hpp000644 000164 177776 00000010452 15107057155 024253 0ustar00jenkinsnogroup000000 000000 // // detail/chrono_time_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #define ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/cstdint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper template to compute the greatest common divisor. template struct gcd { enum { value = gcd::value }; }; template struct gcd { enum { value = v1 }; }; // Adapts std::chrono clocks for use with a deadline timer. template struct chrono_time_traits { // The clock type. typedef Clock clock_type; // The duration type of the clock. typedef typename clock_type::duration duration_type; // The time point type of the clock. typedef typename clock_type::time_point time_type; // The period of the clock. typedef typename duration_type::period period_type; // Get the current time. static time_type now() { return clock_type::now(); } // Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { const time_type epoch; if (t >= epoch) { if ((time_type::max)() - t < d) return (time_type::max)(); } else // t < epoch { if (-(t - (time_type::min)()) > d) return (time_type::min)(); } return t + d; } // Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { const time_type epoch; if (t1 >= epoch) { if (t2 >= epoch) { return t1 - t2; } else if (t2 == (time_type::min)()) { return (duration_type::max)(); } else if ((time_type::max)() - t1 < epoch - t2) { return (duration_type::max)(); } else { return t1 - t2; } } else // t1 < epoch { if (t2 < epoch) { return t1 - t2; } else if (t1 == (time_type::min)()) { return (duration_type::min)(); } else if ((time_type::max)() - t2 < epoch - t1) { return (duration_type::min)(); } else { return -(t2 - t1); } } } // Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { return t1 < t2; } // Implement just enough of the posix_time::time_duration interface to supply // what the timer_queue requires. class posix_time_duration { public: explicit posix_time_duration(const duration_type& d) : d_(d) { } int64_t ticks() const { return d_.count(); } int64_t total_seconds() const { return duration_cast<1, 1>(); } int64_t total_milliseconds() const { return duration_cast<1, 1000>(); } int64_t total_microseconds() const { return duration_cast<1, 1000000>(); } private: template int64_t duration_cast() const { const int64_t num1 = period_type::num / gcd::value; const int64_t num2 = Num / gcd::value; const int64_t den1 = period_type::den / gcd::value; const int64_t den2 = Den / gcd::value; const int64_t num = num1 * den2; const int64_t den = num2 * den1; if (num == 1 && den == 1) return ticks(); else if (num != 1 && den == 1) return ticks() * num; else if (num == 1 && period_type::den != 1) return ticks() / den; else return ticks() * num / den; } duration_type d_; }; // Convert to POSIX duration type. static posix_time_duration to_posix_duration(const duration_type& d) { return posix_time_duration(WaitTraits::to_wait_duration(d)); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP galera-4-26.4.25/asio/asio/detail/select_interrupter.hpp000644 000164 177776 00000002435 15107057155 024303 0ustar00jenkinsnogroup000000 000000 // // detail/select_interrupter.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_INTERRUPTER_HPP #define ASIO_DETAIL_SELECT_INTERRUPTER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS_RUNTIME) #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) # include "asio/detail/socket_select_interrupter.hpp" #elif defined(ASIO_HAS_EVENTFD) # include "asio/detail/eventfd_select_interrupter.hpp" #else # include "asio/detail/pipe_select_interrupter.hpp" #endif namespace asio { namespace detail { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) || defined(__SYMBIAN32__) typedef socket_select_interrupter select_interrupter; #elif defined(ASIO_HAS_EVENTFD) typedef eventfd_select_interrupter select_interrupter; #else typedef pipe_select_interrupter select_interrupter; #endif } // namespace detail } // namespace asio #endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_SELECT_INTERRUPTER_HPP galera-4-26.4.25/asio/asio/detail/gcc_arm_fenced_block.hpp000644 000164 177776 00000004155 15107057155 024433 0ustar00jenkinsnogroup000000 000000 // // detail/gcc_arm_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #define ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(__GNUC__) && defined(__arm__) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class gcc_arm_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit gcc_arm_fenced_block(half_t) { } // Constructor for a full fenced block. explicit gcc_arm_fenced_block(full_t) { barrier(); } // Destructor. ~gcc_arm_fenced_block() { barrier(); } private: static void barrier() { #if defined(__ARM_ARCH_4__) \ || defined(__ARM_ARCH_4T__) \ || defined(__ARM_ARCH_5__) \ || defined(__ARM_ARCH_5E__) \ || defined(__ARM_ARCH_5T__) \ || defined(__ARM_ARCH_5TE__) \ || defined(__ARM_ARCH_5TEJ__) \ || defined(__ARM_ARCH_6__) \ || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6K__) \ || defined(__ARM_ARCH_6Z__) \ || defined(__ARM_ARCH_6ZK__) \ || defined(__ARM_ARCH_6T2__) # if defined(__thumb__) // This is just a placeholder and almost certainly not sufficient. __asm__ __volatile__ ("" : : : "memory"); # else // defined(__thumb__) int a = 0, b = 0; __asm__ __volatile__ ("swp %0, %1, [%2]" : "=&r"(a) : "r"(1), "r"(&b) : "memory", "cc"); # endif // defined(__thumb__) #else // ARMv7 and later. __asm__ __volatile__ ("dmb" : : : "memory"); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(__GNUC__) && defined(__arm__) #endif // ASIO_DETAIL_GCC_ARM_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/std_event.hpp000644 000164 177776 00000007011 15107057155 022347 0ustar00jenkinsnogroup000000 000000 // // detail/std_event.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STD_EVENT_HPP #define ASIO_DETAIL_STD_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class std_event : private noncopyable { public: // Constructor. std_event() : state_(0) { } // Destructor. ~std_event() { } // Signal the event. (Retained for backward compatibility.) template void signal(Lock& lock) { this->signal_all(lock); } // Signal all waiters. template void signal_all(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ |= 1; cond_.notify_all(); } // Unlock the mutex and signal one waiter. template void unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; bool have_waiters = (state_ > 1); lock.unlock(); if (have_waiters) cond_.notify_one(); } // If there's a waiter, unlock the mutex and signal it. template bool maybe_unlock_and_signal_one(Lock& lock) { ASIO_ASSERT(lock.locked()); state_ |= 1; if (state_ > 1) { lock.unlock(); cond_.notify_one(); return true; } return false; } // Reset the event. template void clear(Lock& lock) { ASIO_ASSERT(lock.locked()); (void)lock; state_ &= ~std::size_t(1); } // Wait for the event to become signalled. template void wait(Lock& lock) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); while ((state_ & 1) == 0) { waiter w(state_); cond_.wait(u_lock.unique_lock_); } } // Timed wait for the event to become signalled. template bool wait_for_usec(Lock& lock, long usec) { ASIO_ASSERT(lock.locked()); unique_lock_adapter u_lock(lock); if ((state_ & 1) == 0) { waiter w(state_); cond_.wait_for(u_lock.unique_lock_, std::chrono::microseconds(usec)); } return (state_ & 1) != 0; } private: // Helper class to temporarily adapt a scoped_lock into a unique_lock so that // it can be passed to std::condition_variable::wait(). struct unique_lock_adapter { template explicit unique_lock_adapter(Lock& lock) : unique_lock_(lock.mutex().mutex_, std::adopt_lock) { } ~unique_lock_adapter() { unique_lock_.release(); } std::unique_lock unique_lock_; }; // Helper to increment and decrement the state to track outstanding waiters. class waiter { public: explicit waiter(std::size_t& state) : state_(state) { state_ += 2; } ~waiter() { state_ -= 2; } private: std::size_t& state_; }; std::condition_variable cond_; std::size_t state_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) #endif // ASIO_DETAIL_STD_EVENT_HPP galera-4-26.4.25/asio/asio/detail/cstdint.hpp000644 000164 177776 00000002663 15107057155 022034 0ustar00jenkinsnogroup000000 000000 // // detail/cstdint.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CSTDINT_HPP #define ASIO_DETAIL_CSTDINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CSTDINT) # include #else // defined(ASIO_HAS_CSTDINT) # include #endif // defined(ASIO_HAS_CSTDINT) namespace asio { #if defined(ASIO_HAS_CSTDINT) using std::int16_t; using std::int_least16_t; using std::uint16_t; using std::uint_least16_t; using std::int32_t; using std::int_least32_t; using std::uint32_t; using std::uint_least32_t; using std::int64_t; using std::int_least64_t; using std::uint64_t; using std::uint_least64_t; using std::uintmax_t; #else // defined(ASIO_HAS_CSTDINT) using boost::int16_t; using boost::int_least16_t; using boost::uint16_t; using boost::uint_least16_t; using boost::int32_t; using boost::int_least32_t; using boost::uint32_t; using boost::uint_least32_t; using boost::int64_t; using boost::int_least64_t; using boost::uint64_t; using boost::uint_least64_t; using boost::uintmax_t; #endif // defined(ASIO_HAS_CSTDINT) } // namespace asio #endif // ASIO_DETAIL_CSTDINT_HPP galera-4-26.4.25/asio/asio/detail/config.hpp000644 000164 177776 00000155165 15107057155 021637 0ustar00jenkinsnogroup000000 000000 // // detail/config.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONFIG_HPP #define ASIO_DETAIL_CONFIG_HPP // boostify: non-boost code starts here #if !defined(ASIO_STANDALONE) # if !defined(ASIO_ENABLE_BOOST) # if (__cplusplus >= 201103) # define ASIO_STANDALONE 1 # elif defined(_MSC_VER) && defined(_MSVC_LANG) # if (_MSC_VER >= 1900) && (_MSVC_LANG >= 201103) # define ASIO_STANDALONE 1 # endif // (_MSC_VER >= 1900) && (_MSVC_LANG >= 201103) # endif // defined(_MSC_VER) && defined(_MSVC_LANG) # endif // !defined(ASIO_ENABLE_BOOST) #endif // !defined(ASIO_STANDALONE) // boostify: non-boost code ends here #if defined(ASIO_STANDALONE) # define ASIO_DISABLE_BOOST_ARRAY 1 # define ASIO_DISABLE_BOOST_ASSERT 1 # define ASIO_DISABLE_BOOST_BIND 1 # define ASIO_DISABLE_BOOST_CHRONO 1 # define ASIO_DISABLE_BOOST_DATE_TIME 1 # define ASIO_DISABLE_BOOST_LIMITS 1 # define ASIO_DISABLE_BOOST_REGEX 1 # define ASIO_DISABLE_BOOST_STATIC_CONSTANT 1 # define ASIO_DISABLE_BOOST_THROW_EXCEPTION 1 # define ASIO_DISABLE_BOOST_WORKAROUND 1 #else // defined(ASIO_STANDALONE) # include # include # define ASIO_HAS_BOOST_CONFIG 1 #endif // defined(ASIO_STANDALONE) // Default to a header-only implementation. The user must specifically request // separate compilation by defining either ASIO_SEPARATE_COMPILATION or // ASIO_DYN_LINK (as a DLL/shared library implies separate compilation). #if !defined(ASIO_HEADER_ONLY) # if !defined(ASIO_SEPARATE_COMPILATION) # if !defined(ASIO_DYN_LINK) # define ASIO_HEADER_ONLY 1 # endif // !defined(ASIO_DYN_LINK) # endif // !defined(ASIO_SEPARATE_COMPILATION) #endif // !defined(ASIO_HEADER_ONLY) #if defined(ASIO_HEADER_ONLY) # define ASIO_DECL inline #else // defined(ASIO_HEADER_ONLY) # if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) // We need to import/export our code only if the user has specifically asked // for it by defining ASIO_DYN_LINK. # if defined(ASIO_DYN_LINK) // Export if this is our own source, otherwise import. # if defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllexport) # else // defined(ASIO_SOURCE) # define ASIO_DECL __declspec(dllimport) # endif // defined(ASIO_SOURCE) # endif // defined(ASIO_DYN_LINK) # endif // defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CODEGEARC__) #endif // defined(ASIO_HEADER_ONLY) // If ASIO_DECL isn't defined yet define it now. #if !defined(ASIO_DECL) # define ASIO_DECL #endif // !defined(ASIO_DECL) // Microsoft Visual C++ detection. #if !defined(ASIO_MSVC) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) # define ASIO_MSVC BOOST_MSVC # elif defined(_MSC_VER) && (defined(__INTELLISENSE__) \ || (!defined(__MWERKS__) && !defined(__EDG_VERSION__))) # define ASIO_MSVC _MSC_VER # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_MSVC) #endif // !defined(ASIO_MSVC) // Clang / libc++ detection. #if defined(__clang__) # if (__cplusplus >= 201103) # if __has_include(<__config>) # include <__config> # if defined(_LIBCPP_VERSION) # define ASIO_HAS_CLANG_LIBCXX 1 # endif // defined(_LIBCPP_VERSION) # endif // __has_include(<__config>) # endif // (__cplusplus >= 201103) #endif // defined(__clang__) // Android platform detection. #if defined(__ANDROID__) # include #endif // defined(__ANDROID__) // Support move construction and assignment on compilers known to allow it. #if !defined(ASIO_HAS_MOVE) # if !defined(ASIO_DISABLE_MOVE) # if defined(__clang__) # if __has_feature(__cxx_rvalue_references__) # define ASIO_HAS_MOVE 1 # endif // __has_feature(__cxx_rvalue_references__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_MOVE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_MOVE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # if defined(__INTEL_CXX11_MODE__) # if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500) # define BOOST_ASIO_HAS_MOVE 1 # endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1500) # if defined(__ICL) && (__ICL >= 1500) # define BOOST_ASIO_HAS_MOVE 1 # endif // defined(__ICL) && (__ICL >= 1500) # endif // defined(__INTEL_CXX11_MODE__) # endif // !defined(ASIO_DISABLE_MOVE) #endif // !defined(ASIO_HAS_MOVE) // If ASIO_MOVE_CAST isn't defined, and move support is available, define // * ASIO_MOVE_ARG, // * ASIO_NONDEDUCED_MOVE_ARG, and // * ASIO_MOVE_CAST // to take advantage of rvalue references and perfect forwarding. #if defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) # define ASIO_MOVE_ARG(type) type&& # define ASIO_MOVE_ARG2(type1, type2) type1, type2&& # define ASIO_NONDEDUCED_MOVE_ARG(type) type& # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // defined(ASIO_HAS_MOVE) && !defined(ASIO_MOVE_CAST) // If ASIO_MOVE_CAST still isn't defined, default to a C++03-compatible // implementation. Note that older g++ and MSVC versions don't like it when you // pass a non-member function through a const reference, so for most compilers // we'll play it safe and stick with the old approach of passing the handler by // value. #if !defined(ASIO_MOVE_CAST) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) const type& # else // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # define ASIO_MOVE_ARG(type) type # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) const type& # else // (_MSC_VER >= 1400) # define ASIO_MOVE_ARG(type) type # endif // (_MSC_VER >= 1400) # else # define ASIO_MOVE_ARG(type) type # endif # define ASIO_NONDEDUCED_MOVE_ARG(type) const type& # define ASIO_MOVE_CAST(type) static_cast # define ASIO_MOVE_CAST2(type1, type2) static_cast #endif // !defined(ASIO_MOVE_CAST) // Support variadic templates on compilers known to allow it. #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # if !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) # if defined(__clang__) # if __has_feature(__cxx_variadic_templates__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // __has_feature(__cxx_variadic_templates__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_HAS_VARIADIC_TEMPLATES 1 # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_VARIADIC_TEMPLATES) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) // Support deleted functions on compilers known to allow it. #if !defined(ASIO_DELETED) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_DELETED = delete # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(__clang__) # if __has_feature(__cxx_deleted_functions__) # define ASIO_DELETED = delete # endif // __has_feature(__cxx_deleted_functions__) # endif // defined(__clang__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_DELETED = delete # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # if !defined(ASIO_DELETED) # define ASIO_DELETED # endif // !defined(ASIO_DELETED) #endif // !defined(ASIO_DELETED) // Support constexpr on compilers known to allow it. #if !defined(ASIO_HAS_CONSTEXPR) # if !defined(ASIO_DISABLE_CONSTEXPR) # if defined(__clang__) # if __has_feature(__cxx_constexpr__) # define ASIO_HAS_CONSTEXPR 1 # endif // __has_feature(__cxx_constexr__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CONSTEXPR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_HAS_CONSTEXPR 1 # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CONSTEXPR) #endif // !defined(ASIO_HAS_CONSTEXPR) #if !defined(ASIO_CONSTEXPR) # if defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR constexpr # else // defined(ASIO_HAS_CONSTEXPR) # define ASIO_CONSTEXPR # endif // defined(ASIO_HAS_CONSTEXPR) #endif // !defined(ASIO_CONSTEXPR) // Support noexcept on compilers known to allow it. #if !defined(ASIO_NOEXCEPT) # if !defined(ASIO_DISABLE_NOEXCEPT) # if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 105300) # define ASIO_NOEXCEPT BOOST_NOEXCEPT # define ASIO_NOEXCEPT_OR_NOTHROW BOOST_NOEXCEPT_OR_NOTHROW # elif defined(__clang__) # if __has_feature(__cxx_noexcept__) # define ASIO_NOEXCEPT noexcept(true) # define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true) # endif // __has_feature(__cxx_noexcept__) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_NOEXCEPT noexcept(true) # define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_NOEXCEPT noexcept(true) # define ASIO_NOEXCEPT_OR_NOTHROW noexcept(true) # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_NOEXCEPT) # if !defined(ASIO_NOEXCEPT) # define ASIO_NOEXCEPT # endif // !defined(ASIO_NOEXCEPT) # if !defined(ASIO_NOEXCEPT_OR_NOTHROW) # define ASIO_NOEXCEPT_OR_NOTHROW throw() # endif // !defined(ASIO_NOEXCEPT_OR_NOTHROW) #endif // !defined(ASIO_NOEXCEPT) // Support automatic type deduction on compilers known to support it. #if !defined(ASIO_HAS_DECLTYPE) # if !defined(ASIO_DISABLE_DECLTYPE) # if defined(__clang__) # if __has_feature(__cxx_decltype__) # define ASIO_HAS_DECLTYPE 1 # endif // __has_feature(__cxx_decltype__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_DECLTYPE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1800) # define ASIO_HAS_DECLTYPE 1 # endif // (_MSC_VER >= 1800) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_DECLTYPE) #endif // !defined(ASIO_HAS_DECLTYPE) // Support alias templates on compilers known to allow it. #if !defined(ASIO_HAS_ALIAS_TEMPLATES) # if !defined(ASIO_DISABLE_ALIAS_TEMPLATES) # if defined(__clang__) # if __has_feature(__cxx_alias_templates__) # define ASIO_HAS_ALIAS_TEMPLATES 1 # endif // __has_feature(__cxx_alias_templates__) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_ALIAS_TEMPLATES 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_HAS_ALIAS_TEMPLATES 1 # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_ALIAS_TEMPLATES) #endif // !defined(ASIO_HAS_ALIAS_TEMPLATES) // Standard library support for system errors. #if !defined(ASIO_HAS_STD_SYSTEM_ERROR) # if !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_SYSTEM_ERROR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SYSTEM_ERROR) #endif // !defined(ASIO_HAS_STD_SYSTEM_ERROR) // Compliant C++11 compilers put noexcept specifiers on error_category members. #if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 105300) # define ASIO_ERROR_CATEGORY_NOEXCEPT BOOST_NOEXCEPT # elif defined(__clang__) # if __has_feature(__cxx_noexcept__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // __has_feature(__cxx_noexcept__) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1900) # define ASIO_ERROR_CATEGORY_NOEXCEPT noexcept(true) # endif // (_MSC_VER >= 1900) # endif // defined(ASIO_MSVC) # if !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) # define ASIO_ERROR_CATEGORY_NOEXCEPT # endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) #endif // !defined(ASIO_ERROR_CATEGORY_NOEXCEPT) // Standard library support for arrays. #if !defined(ASIO_HAS_STD_ARRAY) # if !defined(ASIO_DISABLE_STD_ARRAY) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ARRAY 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ARRAY 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ARRAY 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_ARRAY 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ARRAY) #endif // !defined(ASIO_HAS_STD_ARRAY) // Standard library support for shared_ptr and weak_ptr. #if !defined(ASIO_HAS_STD_SHARED_PTR) # if !defined(ASIO_DISABLE_STD_SHARED_PTR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_SHARED_PTR 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_SHARED_PTR 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_SHARED_PTR) #endif // !defined(ASIO_HAS_STD_SHARED_PTR) // Standard library support for allocator_arg_t. #if !defined(ASIO_HAS_STD_ALLOCATOR_ARG) # if !defined(ASIO_DISABLE_STD_ALLOCATOR_ARG) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ALLOCATOR_ARG 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_ALLOCATOR_ARG 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ALLOCATOR_ARG 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1600) # define ASIO_HAS_STD_ALLOCATOR_ARG 1 # endif // (_MSC_VER >= 1600) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ALLOCATOR_ARG) #endif // !defined(ASIO_HAS_STD_ALLOCATOR_ARG) // Standard library support for atomic operations. #if !defined(ASIO_HAS_STD_ATOMIC) # if !defined(ASIO_DISABLE_STD_ATOMIC) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ATOMIC 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_ATOMIC 1 # endif // __has_include() # elif defined(__apple_build_version__) && defined(_LIBCPP_VERSION) # if (__clang_major__ >= 10) # if __has_include() # define ASIO_HAS_STD_ATOMIC 1 # endif // __has_include() # endif // (__clang_major__ >= 10) # endif /// defined(__apple_build_version__) && defined(_LIBCPP_VERSION) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ATOMIC 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ATOMIC 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ATOMIC) #endif // !defined(ASIO_HAS_STD_ATOMIC) // Standard library support for chrono. Some standard libraries (such as the // libstdc++ shipped with gcc 4.6) provide monotonic_clock as per early C++0x // drafts, rather than the eventually standardised name of steady_clock. #if !defined(ASIO_HAS_STD_CHRONO) # if !defined(ASIO_DISABLE_STD_CHRONO) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_CHRONO 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_CHRONO 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_CHRONO 1 # if ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # define ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK 1 # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ == 6)) # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_CHRONO 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_CHRONO) #endif // !defined(ASIO_HAS_STD_CHRONO) // Boost support for chrono. #if !defined(ASIO_HAS_BOOST_CHRONO) # if !defined(ASIO_DISABLE_BOOST_CHRONO) # if defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 104700) # define ASIO_HAS_BOOST_CHRONO 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && (BOOST_VERSION >= 104700) # endif // !defined(ASIO_DISABLE_BOOST_CHRONO) #endif // !defined(ASIO_HAS_BOOST_CHRONO) // Some form of chrono library is available. #if !defined(ASIO_HAS_CHRONO) # if defined(ASIO_HAS_STD_CHRONO) \ || defined(ASIO_HAS_BOOST_CHRONO) # define ASIO_HAS_CHRONO 1 # endif // defined(ASIO_HAS_STD_CHRONO) // || defined(ASIO_HAS_BOOST_CHRONO) #endif // !defined(ASIO_HAS_CHRONO) // Boost support for the DateTime library. #if !defined(ASIO_HAS_BOOST_DATE_TIME) # if !defined(ASIO_DISABLE_BOOST_DATE_TIME) # define ASIO_HAS_BOOST_DATE_TIME 1 # endif // !defined(ASIO_DISABLE_BOOST_DATE_TIME) #endif // !defined(ASIO_HAS_BOOST_DATE_TIME) // Standard library support for addressof. #if !defined(ASIO_HAS_STD_ADDRESSOF) # if !defined(ASIO_DISABLE_STD_ADDRESSOF) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_ADDRESSOF 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_ADDRESSOF 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_ADDRESSOF) #endif // !defined(ASIO_HAS_STD_ADDRESSOF) // Standard library support for the function class. #if !defined(ASIO_HAS_STD_FUNCTION) # if !defined(ASIO_DISABLE_STD_FUNCTION) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_FUNCTION 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_STD_FUNCTION 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_FUNCTION 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_FUNCTION 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_FUNCTION) #endif // !defined(ASIO_HAS_STD_FUNCTION) // Standard library support for type traits. #if !defined(ASIO_HAS_STD_TYPE_TRAITS) # if !defined(ASIO_DISABLE_STD_TYPE_TRAITS) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_TYPE_TRAITS 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_TYPE_TRAITS 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_TYPE_TRAITS) #endif // !defined(ASIO_HAS_STD_TYPE_TRAITS) // Standard library support for the nullptr_t type. #if !defined(ASIO_HAS_NULLPTR) # if !defined(ASIO_DISABLE_NULLPTR) # if defined(__clang__) # if __has_feature(__cxx_nullptr__) # define ASIO_HAS_NULLPTR 1 # endif // __has_feature(__cxx_rvalue_references__) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_NULLPTR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 6)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_NULLPTR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_NULLPTR) #endif // !defined(ASIO_HAS_NULLPTR) // Standard library support for the C++11 allocator additions. #if !defined(ASIO_HAS_CXX11_ALLOCATORS) # if !defined(ASIO_DISABLE_CXX11_ALLOCATORS) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_CXX11_ALLOCATORS 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_CXX11_ALLOCATORS 1 # endif // (__cplusplus >= 201103) # elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CXX11_ALLOCATORS 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1800) # define ASIO_HAS_CXX11_ALLOCATORS 1 # endif // (_MSC_VER >= 1800) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CXX11_ALLOCATORS) #endif // !defined(ASIO_HAS_CXX11_ALLOCATORS) // Standard library support for the cstdint header. #if !defined(ASIO_HAS_CSTDINT) # if !defined(ASIO_DISABLE_CSTDINT) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_CSTDINT 1 # elif (__cplusplus >= 201103) # define ASIO_HAS_CSTDINT 1 # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_CSTDINT 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_CSTDINT 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CSTDINT) #endif // !defined(ASIO_HAS_CSTDINT) // Standard library support for the thread class. #if !defined(ASIO_HAS_STD_THREAD) # if !defined(ASIO_DISABLE_STD_THREAD) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_THREAD 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_THREAD 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_THREAD 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_THREAD 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_THREAD) #endif // !defined(ASIO_HAS_STD_THREAD) // Standard library support for the mutex and condition variable classes. #if !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) # if !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_MUTEX_AND_CONDVAR 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_MUTEX_AND_CONDVAR) #endif // !defined(ASIO_HAS_STD_MUTEX_AND_CONDVAR) // Standard library support for the call_once function. #if !defined(ASIO_HAS_STD_CALL_ONCE) # if !defined(ASIO_DISABLE_STD_CALL_ONCE) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_CALL_ONCE 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_CALL_ONCE 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_CALL_ONCE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_CALL_ONCE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_CALL_ONCE) #endif // !defined(ASIO_HAS_STD_CALL_ONCE) // Standard library support for futures. #if !defined(ASIO_HAS_STD_FUTURE) # if !defined(ASIO_DISABLE_STD_FUTURE) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # define ASIO_HAS_STD_FUTURE 1 # elif (__cplusplus >= 201103) # if __has_include() # define ASIO_HAS_STD_FUTURE 1 # endif // __has_include() # endif // (__cplusplus >= 201103) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_FUTURE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_FUTURE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_FUTURE) #endif // !defined(ASIO_HAS_STD_FUTURE) // Standard library support for std::string_view. #if !defined(ASIO_HAS_STD_STRING_VIEW) # if !defined(ASIO_DISABLE_STD_STRING_VIEW) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # if (__cplusplus >= 201402) # if __has_include() # define ASIO_HAS_STD_STRING_VIEW 1 # endif // __has_include() # endif // (__cplusplus >= 201402) # else // defined(ASIO_HAS_CLANG_LIBCXX) # if (__cplusplus >= 201703) # if __has_include() # define ASIO_HAS_STD_STRING_VIEW 1 # endif // __has_include() # endif // (__cplusplus >= 201703) # endif // defined(ASIO_HAS_CLANG_LIBCXX) # elif defined(__GNUC__) # if (__GNUC__ >= 7) # if (__cplusplus >= 201703) # define ASIO_HAS_STD_STRING_VIEW 1 # endif // (__cplusplus >= 201703) # endif // (__GNUC__ >= 7) # elif defined(ASIO_MSVC) # if (_MSC_VER >= 1910 && _MSVC_LANG >= 201703) # define ASIO_HAS_STD_STRING_VIEW 1 # endif // (_MSC_VER >= 1910 && _MSVC_LANG >= 201703) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_STRING_VIEW) #endif // !defined(ASIO_HAS_STD_STRING_VIEW) // Standard library support for std::experimental::string_view. #if !defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) # if !defined(ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) # if defined(__clang__) # if defined(ASIO_HAS_CLANG_LIBCXX) # if (_LIBCPP_VERSION < 7000) # if (__cplusplus >= 201402) # if __has_include() # define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1 # endif // __has_include() # endif // (__cplusplus >= 201402) # endif // (_LIBCPP_VERSION < 7000) # else // defined(ASIO_HAS_CLANG_LIBCXX) # if (__cplusplus >= 201402) # if __has_include() # define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1 # endif // __has_include() # endif // (__cplusplus >= 201402) # endif // // defined(ASIO_HAS_CLANG_LIBCXX) # endif // defined(__clang__) # if defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)) || (__GNUC__ > 4) # if (__cplusplus >= 201402) # define ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW 1 # endif // (__cplusplus >= 201402) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 9)) || (__GNUC__ > 4) # endif // defined(__GNUC__) # endif // !defined(ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) #endif // !defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) // Standard library has a string_view that we can use. #if !defined(ASIO_HAS_STRING_VIEW) # if !defined(ASIO_DISABLE_STRING_VIEW) # if defined(ASIO_HAS_STD_STRING_VIEW) # define ASIO_HAS_STRING_VIEW 1 # elif defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) # define ASIO_HAS_STRING_VIEW 1 # endif // defined(ASIO_HAS_STD_EXPERIMENTAL_STRING_VIEW) # endif // !defined(ASIO_DISABLE_STRING_VIEW) #endif // !defined(ASIO_HAS_STRING_VIEW) // Standard library support for iostream move construction and assignment. #if !defined(ASIO_HAS_STD_IOSTREAM_MOVE) # if !defined(ASIO_DISABLE_STD_IOSTREAM_MOVE) # if defined(__GNUC__) # if (__GNUC__ > 4) # if defined(__GXX_EXPERIMENTAL_CXX0X__) # define ASIO_HAS_STD_IOSTREAM_MOVE 1 # endif // defined(__GXX_EXPERIMENTAL_CXX0X__) # endif // (__GNUC__ > 4) # endif // defined(__GNUC__) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1700) # define ASIO_HAS_STD_IOSTREAM_MOVE 1 # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_IOSTREAM_MOVE) #endif // !defined(ASIO_HAS_STD_IOSTREAM_MOVE) // Standard library has invoke_result (which supersedes result_of). #if !defined(ASIO_HAS_STD_INVOKE_RESULT) # if !defined(ASIO_DISABLE_STD_INVOKE_RESULT) # if defined(ASIO_MSVC) # if (_MSC_VER >= 1911 && _MSVC_LANG >= 201703) # define ASIO_HAS_STD_INVOKE_RESULT 1 # endif // (_MSC_VER >= 1911 && _MSVC_LANG >= 201703) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_STD_INVOKE_RESULT) #endif // !defined(ASIO_HAS_STD_INVOKE_RESULT) // Windows App target. Windows but with a limited API. #if !defined(ASIO_WINDOWS_APP) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603) # include # if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \ && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # define ASIO_WINDOWS_APP 1 # endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0603) #endif // !defined(ASIO_WINDOWS_APP) // Legacy WinRT target. Windows App is preferred. #if !defined(ASIO_WINDOWS_RUNTIME) # if !defined(ASIO_WINDOWS_APP) # if defined(__cplusplus_winrt) # include # if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) \ && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # define ASIO_WINDOWS_RUNTIME 1 # endif // WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) // && !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) # endif // defined(__cplusplus_winrt) # endif // !defined(ASIO_WINDOWS_APP) #endif // !defined(ASIO_WINDOWS_RUNTIME) // Windows target. Excludes WinRT but includes Windows App targets. #if !defined(ASIO_WINDOWS) # if !defined(ASIO_WINDOWS_RUNTIME) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # define ASIO_WINDOWS 1 # elif defined(WIN32) || defined(_WIN32) || defined(__WIN32__) # define ASIO_WINDOWS 1 # elif defined(ASIO_WINDOWS_APP) # define ASIO_WINDOWS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_WINDOWS) # endif // !defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_WINDOWS) // Windows: target OS version. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) || defined(__BORLANDC__) # pragma message( \ "Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. For example:\n"\ "- add -D_WIN32_WINNT=0x0601 to the compiler command line; or\n"\ "- add _WIN32_WINNT=0x0601 to your project's Preprocessor Definitions.\n"\ "Assuming _WIN32_WINNT=0x0601 (i.e. Windows 7 target).") # else // defined(_MSC_VER) || defined(__BORLANDC__) # warning Please define _WIN32_WINNT or _WIN32_WINDOWS appropriately. # warning For example, add -D_WIN32_WINNT=0x0601 to the compiler command line. # warning Assuming _WIN32_WINNT=0x0601 (i.e. Windows 7 target). # endif // defined(_MSC_VER) || defined(__BORLANDC__) # define _WIN32_WINNT 0x0601 # endif // !defined(_WIN32_WINNT) && !defined(_WIN32_WINDOWS) # if defined(_MSC_VER) # if defined(_WIN32) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(_WIN32) && !defined(WIN32) # endif // defined(_MSC_VER) # if defined(__BORLANDC__) # if defined(__WIN32__) && !defined(WIN32) # if !defined(_WINSOCK2API_) # define WIN32 // Needed for correct types in winsock2.h # else // !defined(_WINSOCK2API_) # error Please define the macro WIN32 in your compiler options # endif // !defined(_WINSOCK2API_) # endif // defined(__WIN32__) && !defined(WIN32) # endif // defined(__BORLANDC__) # if defined(__CYGWIN__) # if !defined(__USE_W32_SOCKETS) # error You must add -D__USE_W32_SOCKETS to your compiler options. # endif // !defined(__USE_W32_SOCKETS) # endif // defined(__CYGWIN__) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: minimise header inclusion. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) # if !defined(WIN32_LEAN_AND_MEAN) # define WIN32_LEAN_AND_MEAN # endif // !defined(WIN32_LEAN_AND_MEAN) # endif // !defined(ASIO_NO_WIN32_LEAN_AND_MEAN) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: suppress definition of "min" and "max" macros. #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(ASIO_NO_NOMINMAX) # if !defined(NOMINMAX) # define NOMINMAX 1 # endif // !defined(NOMINMAX) # endif // !defined(ASIO_NO_NOMINMAX) #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Windows: IO Completion Ports. #if !defined(ASIO_HAS_IOCP) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # if !defined(ASIO_DISABLE_IOCP) # define ASIO_HAS_IOCP 1 # endif // !defined(ASIO_DISABLE_IOCP) # endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0400) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #endif // !defined(ASIO_HAS_IOCP) // On POSIX (and POSIX-like) platforms we need to include unistd.h in order to // get access to the various platform feature macros, e.g. to be able to test // for threads support. #if !defined(ASIO_HAS_UNISTD_H) # if !defined(ASIO_HAS_BOOST_CONFIG) # if defined(unix) \ || defined(__unix) \ || defined(_XOPEN_SOURCE) \ || defined(_POSIX_SOURCE) \ || (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) \ || defined(__linux__) \ || defined(__HAIKU__) # define ASIO_HAS_UNISTD_H 1 # endif # endif // !defined(ASIO_HAS_BOOST_CONFIG) #endif // !defined(ASIO_HAS_UNISTD_H) #if defined(ASIO_HAS_UNISTD_H) # include #endif // defined(ASIO_HAS_UNISTD_H) // Linux: epoll, eventfd and timerfd. #if defined(__linux__) # include # if !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_DISABLE_EPOLL) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # define ASIO_HAS_EPOLL 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,45) # endif // !defined(ASIO_DISABLE_EPOLL) # endif // !defined(ASIO_HAS_EPOLL) # if !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_DISABLE_EVENTFD) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # define ASIO_HAS_EVENTFD 1 # endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) # endif // !defined(ASIO_DISABLE_EVENTFD) # endif // !defined(ASIO_HAS_EVENTFD) # if !defined(ASIO_HAS_TIMERFD) # if defined(ASIO_HAS_EPOLL) # if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # define ASIO_HAS_TIMERFD 1 # endif // (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 8) # endif // defined(ASIO_HAS_EPOLL) # endif // !defined(ASIO_HAS_TIMERFD) #endif // defined(__linux__) // Mac OS X, FreeBSD, NetBSD, OpenBSD: kqueue. #if (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) # if !defined(ASIO_HAS_KQUEUE) # if !defined(ASIO_DISABLE_KQUEUE) # define ASIO_HAS_KQUEUE 1 # endif // !defined(ASIO_DISABLE_KQUEUE) # endif // !defined(ASIO_HAS_KQUEUE) #endif // (defined(__MACH__) && defined(__APPLE__)) // || defined(__FreeBSD__) // || defined(__NetBSD__) // || defined(__OpenBSD__) // Solaris: /dev/poll. #if defined(__sun) # if !defined(ASIO_HAS_DEV_POLL) # if !defined(ASIO_DISABLE_DEV_POLL) # define ASIO_HAS_DEV_POLL 1 # endif // !defined(ASIO_DISABLE_DEV_POLL) # endif // !defined(ASIO_HAS_DEV_POLL) #endif // defined(__sun) // Serial ports. #if !defined(ASIO_HAS_SERIAL_PORT) # if defined(ASIO_HAS_IOCP) \ || !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # if !defined(__SYMBIAN32__) # if !defined(ASIO_DISABLE_SERIAL_PORT) # define ASIO_HAS_SERIAL_PORT 1 # endif // !defined(ASIO_DISABLE_SERIAL_PORT) # endif // !defined(__SYMBIAN32__) # endif // defined(ASIO_HAS_IOCP) // || !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) #endif // !defined(ASIO_HAS_SERIAL_PORT) // Windows: stream handles. #if !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_STREAM_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_STREAM_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // Windows: random access handles. #if !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_RANDOM_ACCESS_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // Windows: object handles. #if !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) # if !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # define ASIO_HAS_WINDOWS_OBJECT_HANDLE 1 # endif // !defined(UNDER_CE) && !defined(ASIO_WINDOWS_APP) # endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_WINDOWS_OBJECT_HANDLE) #endif // !defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // Windows: OVERLAPPED wrapper. #if !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) # if !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) # if defined(ASIO_HAS_IOCP) # define ASIO_HAS_WINDOWS_OVERLAPPED_PTR 1 # endif // defined(ASIO_HAS_IOCP) # endif // !defined(ASIO_DISABLE_WINDOWS_OVERLAPPED_PTR) #endif // !defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) // POSIX: stream-oriented file descriptors. #if !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_POSIX_STREAM_DESCRIPTOR 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_POSIX_STREAM_DESCRIPTOR) #endif // !defined(ASIO_HAS_POSIX_STREAM_DESCRIPTOR) // UNIX domain sockets. #if !defined(ASIO_HAS_LOCAL_SOCKETS) # if !defined(ASIO_DISABLE_LOCAL_SOCKETS) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_LOCAL_SOCKETS 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_LOCAL_SOCKETS) #endif // !defined(ASIO_HAS_LOCAL_SOCKETS) // Can use sigaction() instead of signal(). #if !defined(ASIO_HAS_SIGACTION) # if !defined(ASIO_DISABLE_SIGACTION) # if !defined(ASIO_WINDOWS) \ && !defined(ASIO_WINDOWS_RUNTIME) \ && !defined(__CYGWIN__) # define ASIO_HAS_SIGACTION 1 # endif // !defined(ASIO_WINDOWS) // && !defined(ASIO_WINDOWS_RUNTIME) // && !defined(__CYGWIN__) # endif // !defined(ASIO_DISABLE_SIGACTION) #endif // !defined(ASIO_HAS_SIGACTION) // Can use signal(). #if !defined(ASIO_HAS_SIGNAL) # if !defined(ASIO_DISABLE_SIGNAL) # if !defined(UNDER_CE) # define ASIO_HAS_SIGNAL 1 # endif // !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SIGNAL) #endif // !defined(ASIO_HAS_SIGNAL) // Can use getaddrinfo() and getnameinfo(). #if !defined(ASIO_HAS_GETADDRINFO) # if !defined(ASIO_DISABLE_GETADDRINFO) # if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0501) # define ASIO_HAS_GETADDRINFO 1 # elif defined(UNDER_CE) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(UNDER_CE) # elif defined(__MACH__) && defined(__APPLE__) # if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # if (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) # define ASIO_HAS_GETADDRINFO 1 # endif // (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1050) # else // defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # else // defined(__MACH__) && defined(__APPLE__) # define ASIO_HAS_GETADDRINFO 1 # endif // defined(__MACH__) && defined(__APPLE__) # endif // !defined(ASIO_DISABLE_GETADDRINFO) #endif // !defined(ASIO_HAS_GETADDRINFO) // Whether standard iostreams are disabled. #if !defined(ASIO_NO_IOSTREAM) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_IOSTREAM) # define ASIO_NO_IOSTREAM 1 # endif // !defined(BOOST_NO_IOSTREAM) #endif // !defined(ASIO_NO_IOSTREAM) // Whether exception handling is disabled. #if !defined(ASIO_NO_EXCEPTIONS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_EXCEPTIONS) # define ASIO_NO_EXCEPTIONS 1 # endif // !defined(BOOST_NO_EXCEPTIONS) #endif // !defined(ASIO_NO_EXCEPTIONS) // Whether the typeid operator is supported. #if !defined(ASIO_NO_TYPEID) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_NO_TYPEID) # define ASIO_NO_TYPEID 1 # endif // !defined(BOOST_NO_TYPEID) #endif // !defined(ASIO_NO_TYPEID) // Threads. #if !defined(ASIO_HAS_THREADS) # if !defined(ASIO_DISABLE_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # define ASIO_HAS_THREADS 1 # elif defined(__GNUC__) && !defined(__MINGW32__) \ && !defined(linux) && !defined(__linux) && !defined(__linux__) # define ASIO_HAS_THREADS 1 # elif defined(_MT) || defined(__MT__) # define ASIO_HAS_THREADS 1 # elif defined(_REENTRANT) # define ASIO_HAS_THREADS 1 # elif defined(__APPLE__) # define ASIO_HAS_THREADS 1 # elif defined(__HAIKU__) # define ASIO_HAS_THREADS 1 # elif defined(_POSIX_THREADS) && (_POSIX_THREADS + 0 >= 0) # define ASIO_HAS_THREADS 1 # elif defined(_PTHREADS) # define ASIO_HAS_THREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_THREADS) # endif // !defined(ASIO_DISABLE_THREADS) #endif // !defined(ASIO_HAS_THREADS) // POSIX threads. #if !defined(ASIO_HAS_PTHREADS) # if defined(ASIO_HAS_THREADS) # if defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # define ASIO_HAS_PTHREADS 1 # elif defined(_POSIX_THREADS) && (_POSIX_THREADS + 0 >= 0) # define ASIO_HAS_PTHREADS 1 # elif defined(__HAIKU__) # define ASIO_HAS_PTHREADS 1 # endif // defined(ASIO_HAS_BOOST_CONFIG) && defined(BOOST_HAS_PTHREADS) # endif // defined(ASIO_HAS_THREADS) #endif // !defined(ASIO_HAS_PTHREADS) // Helper to prevent macro expansion. #define ASIO_PREVENT_MACRO_SUBSTITUTION // Helper to define in-class constants. #if !defined(ASIO_STATIC_CONSTANT) # if !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ BOOST_STATIC_CONSTANT(type, assignment) # else // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) # define ASIO_STATIC_CONSTANT(type, assignment) \ static const type assignment # endif // !defined(ASIO_DISABLE_BOOST_STATIC_CONSTANT) #endif // !defined(ASIO_STATIC_CONSTANT) // Boost array library. #if !defined(ASIO_HAS_BOOST_ARRAY) # if !defined(ASIO_DISABLE_BOOST_ARRAY) # define ASIO_HAS_BOOST_ARRAY 1 # endif // !defined(ASIO_DISABLE_BOOST_ARRAY) #endif // !defined(ASIO_HAS_BOOST_ARRAY) // Boost assert macro. #if !defined(ASIO_HAS_BOOST_ASSERT) # if !defined(ASIO_DISABLE_BOOST_ASSERT) # define ASIO_HAS_BOOST_ASSERT 1 # endif // !defined(ASIO_DISABLE_BOOST_ASSERT) #endif // !defined(ASIO_HAS_BOOST_ASSERT) // Boost limits header. #if !defined(ASIO_HAS_BOOST_LIMITS) # if !defined(ASIO_DISABLE_BOOST_LIMITS) # define ASIO_HAS_BOOST_LIMITS 1 # endif // !defined(ASIO_DISABLE_BOOST_LIMITS) #endif // !defined(ASIO_HAS_BOOST_LIMITS) // Boost throw_exception function. #if !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) # if !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) # define ASIO_HAS_BOOST_THROW_EXCEPTION 1 # endif // !defined(ASIO_DISABLE_BOOST_THROW_EXCEPTION) #endif // !defined(ASIO_HAS_BOOST_THROW_EXCEPTION) // Boost regex library. #if !defined(ASIO_HAS_BOOST_REGEX) # if !defined(ASIO_DISABLE_BOOST_REGEX) # define ASIO_HAS_BOOST_REGEX 1 # endif // !defined(ASIO_DISABLE_BOOST_REGEX) #endif // !defined(ASIO_HAS_BOOST_REGEX) // Boost bind function. #if !defined(ASIO_HAS_BOOST_BIND) # if !defined(ASIO_DISABLE_BOOST_BIND) # define ASIO_HAS_BOOST_BIND 1 # endif // !defined(ASIO_DISABLE_BOOST_BIND) #endif // !defined(ASIO_HAS_BOOST_BIND) // Boost's BOOST_WORKAROUND macro. #if !defined(ASIO_HAS_BOOST_WORKAROUND) # if !defined(ASIO_DISABLE_BOOST_WORKAROUND) # define ASIO_HAS_BOOST_WORKAROUND 1 # endif // !defined(ASIO_DISABLE_BOOST_WORKAROUND) #endif // !defined(ASIO_HAS_BOOST_WORKAROUND) // Microsoft Visual C++'s secure C runtime library. #if !defined(ASIO_HAS_SECURE_RTL) # if !defined(ASIO_DISABLE_SECURE_RTL) # if defined(ASIO_MSVC) \ && (ASIO_MSVC >= 1400) \ && !defined(UNDER_CE) # define ASIO_HAS_SECURE_RTL 1 # endif // defined(ASIO_MSVC) // && (ASIO_MSVC >= 1400) // && !defined(UNDER_CE) # endif // !defined(ASIO_DISABLE_SECURE_RTL) #endif // !defined(ASIO_HAS_SECURE_RTL) // Handler hooking. Disabled for ancient Borland C++ and gcc compilers. #if !defined(ASIO_HAS_HANDLER_HOOKS) # if !defined(ASIO_DISABLE_HANDLER_HOOKS) # if defined(__GNUC__) # if (__GNUC__ >= 3) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // (__GNUC__ >= 3) # elif !defined(__BORLANDC__) # define ASIO_HAS_HANDLER_HOOKS 1 # endif // !defined(__BORLANDC__) # endif // !defined(ASIO_DISABLE_HANDLER_HOOKS) #endif // !defined(ASIO_HAS_HANDLER_HOOKS) // Support for the __thread keyword extension. #if !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) # if defined(__linux__) # if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # if ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # if !defined(__INTEL_COMPILER) && !defined(__ICL) \ && !(defined(__clang__) && defined(__ANDROID__)) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __thread # elif defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # endif // defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1100) // && !(defined(__clang__) && defined(__ANDROID__)) # endif // ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3) # endif // defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # endif // defined(__linux__) # if defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) # if (_MSC_VER >= 1700) # define ASIO_HAS_THREAD_KEYWORD_EXTENSION 1 # define ASIO_THREAD_KEYWORD __declspec(thread) # endif // (_MSC_VER >= 1700) # endif // defined(ASIO_MSVC) && defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(ASIO_DISABLE_THREAD_KEYWORD_EXTENSION) #if !defined(ASIO_THREAD_KEYWORD) # define ASIO_THREAD_KEYWORD __thread #endif // !defined(ASIO_THREAD_KEYWORD) // Support for POSIX ssize_t typedef. #if !defined(ASIO_DISABLE_SSIZE_T) # if defined(__linux__) \ || (defined(__MACH__) && defined(__APPLE__)) # define ASIO_HAS_SSIZE_T 1 # endif // defined(__linux__) // || (defined(__MACH__) && defined(__APPLE__)) #endif // !defined(ASIO_DISABLE_SSIZE_T) // Helper macros to manage transition away from error_code return values. #if defined(ASIO_NO_DEPRECATED) # define ASIO_SYNC_OP_VOID void # define ASIO_SYNC_OP_VOID_RETURN(e) return #else // defined(ASIO_NO_DEPRECATED) # define ASIO_SYNC_OP_VOID asio::error_code # define ASIO_SYNC_OP_VOID_RETURN(e) return e #endif // defined(ASIO_NO_DEPRECATED) // Newer gcc, clang need special treatment to suppress unused typedef warnings. #if defined(__clang__) # if defined(__apple_build_version__) # if (__clang_major__ >= 7) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // (__clang_major__ >= 7) # elif ((__clang_major__ == 3) && (__clang_minor__ >= 6)) \ || (__clang_major__ > 3) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // ((__clang_major__ == 3) && (__clang_minor__ >= 6)) // || (__clang_major__ > 3) #elif defined(__GNUC__) # if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) # define ASIO_UNUSED_TYPEDEF __attribute__((__unused__)) # endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4) #endif // defined(__GNUC__) #if !defined(ASIO_UNUSED_TYPEDEF) # define ASIO_UNUSED_TYPEDEF #endif // !defined(ASIO_UNUSED_TYPEDEF) // Some versions of gcc generate spurious warnings about unused variables. #if defined(__GNUC__) # if (__GNUC__ >= 4) # define ASIO_UNUSED_VARIABLE __attribute__((__unused__)) # endif // (__GNUC__ >= 4) #endif // defined(__GNUC__) #if !defined(ASIO_UNUSED_VARIABLE) # define ASIO_UNUSED_VARIABLE #endif // !defined(ASIO_UNUSED_VARIABLE) // Support co_await on compilers known to allow it. #if !defined(ASIO_HAS_CO_AWAIT) # if !defined(ASIO_DISABLE_CO_AWAIT) # if defined(ASIO_MSVC) # if (_MSC_FULL_VER >= 190023506) # if defined(_RESUMABLE_FUNCTIONS_SUPPORTED) # define ASIO_HAS_CO_AWAIT 1 # endif // defined(_RESUMABLE_FUNCTIONS_SUPPORTED) # endif // (_MSC_FULL_VER >= 190023506) # endif // defined(ASIO_MSVC) # endif // !defined(ASIO_DISABLE_CO_AWAIT) # if defined(__clang__) # if (__cplusplus >= 201703) && (__cpp_coroutines >= 201703) # if __has_include() # define ASIO_HAS_CO_AWAIT 1 # endif // __has_include() # endif // (__cplusplus >= 201703) && (__cpp_coroutines >= 201703) # endif // defined(__clang__) #endif // !defined(ASIO_HAS_CO_AWAIT) #endif // ASIO_DETAIL_CONFIG_HPP galera-4-26.4.25/asio/asio/detail/posix_global.hpp000644 000164 177776 00000003270 15107057155 023041 0ustar00jenkinsnogroup000000 000000 // // detail/posix_global.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_GLOBAL_HPP #define ASIO_DETAIL_POSIX_GLOBAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct posix_global_impl { // Helper function to perform initialisation. static void do_init() { instance_.static_ptr_ = instance_.ptr_ = new T; } // Destructor automatically cleans up the global. ~posix_global_impl() { delete static_ptr_; } static ::pthread_once_t init_once_; static T* static_ptr_; static posix_global_impl instance_; T* ptr_; }; template ::pthread_once_t posix_global_impl::init_once_ = PTHREAD_ONCE_INIT; template T* posix_global_impl::static_ptr_ = 0; template posix_global_impl posix_global_impl::instance_; template T& posix_global() { int result = ::pthread_once( &posix_global_impl::init_once_, &posix_global_impl::do_init); if (result != 0) std::terminate(); return *posix_global_impl::instance_.ptr_; } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_GLOBAL_HPP galera-4-26.4.25/asio/asio/detail/buffer_resize_guard.hpp000644 000164 177776 00000002750 15107057155 024375 0ustar00jenkinsnogroup000000 000000 // // detail/buffer_resize_guard.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #define ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Helper class to manage buffer resizing in an exception safe way. template class buffer_resize_guard { public: // Constructor. buffer_resize_guard(Buffer& buffer) : buffer_(buffer), old_size_(buffer.size()) { } // Destructor rolls back the buffer resize unless commit was called. ~buffer_resize_guard() { if (old_size_ != (std::numeric_limits::max)()) { buffer_.resize(old_size_); } } // Commit the resize transaction. void commit() { old_size_ = (std::numeric_limits::max)(); } private: // The buffer being managed. Buffer& buffer_; // The size of the buffer at the time the guard was constructed. size_t old_size_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_overlapped_ptr.hpp000644 000164 177776 00000007562 15107057155 025304 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_overlapped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/io_context.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/io_object_executor.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/win_iocp_overlapped_op.hpp" #include "asio/detail/win_iocp_io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Wraps a handler to create an OVERLAPPED object for use with overlapped I/O. class win_iocp_overlapped_ptr : private noncopyable { public: // Construct an empty win_iocp_overlapped_ptr. win_iocp_overlapped_ptr() : ptr_(0), iocp_service_(0) { } // Construct an win_iocp_overlapped_ptr to contain the specified handler. template explicit win_iocp_overlapped_ptr(const Executor& ex, ASIO_MOVE_ARG(Handler) handler) : ptr_(0), iocp_service_(0) { this->reset(ex, ASIO_MOVE_CAST(Handler)(handler)); } // Destructor automatically frees the OVERLAPPED object unless released. ~win_iocp_overlapped_ptr() { reset(); } // Reset to empty. void reset() { if (ptr_) { ptr_->destroy(); ptr_ = 0; iocp_service_->work_finished(); iocp_service_ = 0; } } // Reset to contain the specified handler, freeing any current OVERLAPPED // object. template void reset(const Executor& ex, Handler handler) { const bool native = is_same::value; win_iocp_io_context* iocp_service = this->get_iocp_service(ex); typedef win_iocp_overlapped_op > op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_object_executor(ex, native)); ASIO_HANDLER_CREATION((ex.context(), *p.p, "iocp_service", iocp_service, 0, "overlapped")); iocp_service->work_started(); reset(); ptr_ = p.p; p.v = p.p = 0; iocp_service_ = iocp_service; } // Get the contained OVERLAPPED object. OVERLAPPED* get() { return ptr_; } // Get the contained OVERLAPPED object. const OVERLAPPED* get() const { return ptr_; } // Release ownership of the OVERLAPPED object. OVERLAPPED* release() { if (ptr_) iocp_service_->on_pending(ptr_); OVERLAPPED* tmp = ptr_; ptr_ = 0; iocp_service_ = 0; return tmp; } // Post completion notification for overlapped operation. Releases ownership. void complete(const asio::error_code& ec, std::size_t bytes_transferred) { if (ptr_) { iocp_service_->on_completion(ptr_, ec, static_cast(bytes_transferred)); ptr_ = 0; iocp_service_ = 0; } } private: template static win_iocp_io_context* get_iocp_service(const Executor& ex) { return &use_service(ex.context()); } static win_iocp_io_context* get_iocp_service( const io_context::executor_type& ex) { return &ex.context().impl_; } win_iocp_operation* ptr_; win_iocp_io_context* iocp_service_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_PTR_HPP galera-4-26.4.25/asio/asio/detail/conditionally_enabled_event.hpp000644 000164 177776 00000005317 15107057155 026106 0ustar00jenkinsnogroup000000 000000 // // detail/conditionally_enabled_event.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP #define ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/conditionally_enabled_mutex.hpp" #include "asio/detail/event.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/null_event.hpp" #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Mutex adapter used to conditionally enable or disable locking. class conditionally_enabled_event : private noncopyable { public: // Constructor. conditionally_enabled_event() { } // Destructor. ~conditionally_enabled_event() { } // Signal the event. (Retained for backward compatibility.) void signal(conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) event_.signal(lock); } // Signal all waiters. void signal_all(conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) event_.signal_all(lock); } // Unlock the mutex and signal one waiter. void unlock_and_signal_one( conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) event_.unlock_and_signal_one(lock); } // If there's a waiter, unlock the mutex and signal it. bool maybe_unlock_and_signal_one( conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) return event_.maybe_unlock_and_signal_one(lock); else return false; } // Reset the event. void clear(conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) event_.clear(lock); } // Wait for the event to become signalled. void wait(conditionally_enabled_mutex::scoped_lock& lock) { if (lock.mutex_.enabled_) event_.wait(lock); else null_event().wait(lock); } // Timed wait for the event to become signalled. bool wait_for_usec( conditionally_enabled_mutex::scoped_lock& lock, long usec) { if (lock.mutex_.enabled_) return event_.wait_for_usec(lock, usec); else return null_event().wait_for_usec(lock, usec); } private: asio::detail::event event_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP galera-4-26.4.25/asio/asio/detail/null_fenced_block.hpp000644 000164 177776 00000001711 15107057155 024005 0ustar00jenkinsnogroup000000 000000 // // detail/null_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #define ASIO_DETAIL_NULL_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class null_fenced_block : private noncopyable { public: enum half_or_full_t { half, full }; // Constructor. explicit null_fenced_block(half_or_full_t) { } // Destructor. ~null_fenced_block() { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_NULL_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/null_socket_service.hpp000644 000164 177776 00000037646 15107057155 024437 0ustar00jenkinsnogroup000000 000000 // // detail/null_socket_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #define ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/post.hpp" #include "asio/socket_base.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class null_socket_service : public execution_context_service_base > { public: // The protocol type. typedef Protocol protocol_type; // The endpoint type. typedef typename Protocol::endpoint endpoint_type; // The native type of a socket. typedef int native_handle_type; // The implementation type of the socket. struct implementation_type { }; // Constructor. null_socket_service(execution_context& context) : execution_context_service_base >(context) { } // Destroy all user-defined handler objects owned by the service. void shutdown() { } // Construct a new socket implementation. void construct(implementation_type&) { } // Move-construct a new socket implementation. void move_construct(implementation_type&, implementation_type&) { } // Move-assign from another socket implementation. void move_assign(implementation_type&, null_socket_service&, implementation_type&) { } // Move-construct a new socket implementation from another protocol type. template void converting_move_construct(implementation_type&, null_socket_service&, typename null_socket_service::implementation_type&) { } // Destroy a socket implementation. void destroy(implementation_type&) { } // Open a new socket implementation. asio::error_code open(implementation_type&, const protocol_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Assign a native socket to a socket implementation. asio::error_code assign(implementation_type&, const protocol_type&, const native_handle_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is open. bool is_open(const implementation_type&) const { return false; } // Destroy a socket implementation. asio::error_code close(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Release ownership of the socket. native_handle_type release(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Get the native socket representation. native_handle_type native_handle(implementation_type&) { return 0; } // Cancel all operations associated with the socket. asio::error_code cancel(implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Place the socket into the state where it will listen for new connections. asio::error_code listen(implementation_type&, int, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Disable sends or receives on the socket. asio::error_code shutdown(implementation_type&, socket_base::shutdown_type, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Bind the socket to the specified local endpoint. asio::error_code bind(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code set_option(implementation_type&, const Option&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Set a socket option. template asio::error_code get_option(const implementation_type&, Option&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return ec; } // Get the local endpoint. endpoint_type local_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Get the remote endpoint. endpoint_type remote_endpoint(const implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return endpoint_type(); } // Send the given data to the peer. template std::size_t send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(implementation_type&, const ConstBufferSequence&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive(implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive(implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Receive some data with associated flags. Returns the number of bytes // received. template std::size_t receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(implementation_type&, const MutableBufferSequence&, socket_base::message_flags, socket_base::message_flags&, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_with_flags(implementation_type&, const null_buffers&, socket_base::message_flags, socket_base::message_flags&, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Send a datagram to the specified endpoint. Returns the number of bytes // sent. template std::size_t send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be sent without blocking. std::size_t send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send_to(implementation_type&, const ConstBufferSequence&, const endpoint_type&, socket_base::message_flags, Handler& handler) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Start an asynchronous wait until data can be sent without blocking. template void async_send_to(implementation_type&, const null_buffers&, const endpoint_type&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Receive a datagram with the endpoint of the sender. Returns the number of // bytes received. template std::size_t receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Wait until data can be received without blocking. std::size_t receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received and // the sender_endpoint object must both be valid for the lifetime of the // asynchronous operation. template void async_receive_from(implementation_type&, const MutableBufferSequence&, endpoint_type&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Wait until data can be received without blocking. template void async_receive_from(implementation_type&, const null_buffers&, endpoint_type&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler( handler, ec, bytes_transferred)); } // Accept a new connection. template asio::error_code accept(implementation_type&, Socket&, endpoint_type*, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous accept. The peer and peer_endpoint objects // must be valid until the accept's handler is invoked. template void async_accept(implementation_type&, Socket&, endpoint_type*, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; asio::post(io_ex, detail::bind_handler(handler, ec)); } // Connect the socket to the specified endpoint. asio::error_code connect(implementation_type&, const endpoint_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Start an asynchronous connect. template void async_connect(implementation_type&, const endpoint_type&, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; asio::post(io_ex, detail::bind_handler(handler, ec)); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_NULL_SOCKET_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/select_reactor.hpp000644 000164 177776 00000020233 15107057155 023353 0ustar00jenkinsnogroup000000 000000 // // detail/select_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SELECT_REACTOR_HPP #define ASIO_DETAIL_SELECT_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) \ || (!defined(ASIO_HAS_DEV_POLL) \ && !defined(ASIO_HAS_EPOLL) \ && !defined(ASIO_HAS_KQUEUE) \ && !defined(ASIO_WINDOWS_RUNTIME)) #include #include "asio/detail/fd_set_adapter.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/reactor_op_queue.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/execution_context.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/thread.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class select_reactor : public execution_context_service_base { public: #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 3, max_ops = 4 }; #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) enum op_types { read_op = 0, write_op = 1, except_op = 2, max_select_ops = 3, connect_op = 1, max_ops = 3 }; #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Per-descriptor data. struct per_descriptor_data { }; // Constructor. ASIO_DECL select_reactor(asio::execution_context& ctx); // Destructor. ASIO_DECL ~select_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Recreate internal descriptors following a fork. ASIO_DECL void notify_fork( asio::execution_context::fork_event fork_ev); // Initialise the task, but only if the reactor is not in its own thread. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { scheduler_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data&, reactor_op* op, bool is_continuation, bool); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. The reactor resources associated with // the descriptor must be released by calling cleanup_descriptor_data. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data&, bool closing); // Remove the descriptor's registration from the reactor. The reactor // resources associated with the descriptor must be released by calling // cleanup_descriptor_data. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data&); // Perform any post-deregistration cleanup tasks associated with the // descriptor data. ASIO_DECL void cleanup_descriptor_data(per_descriptor_data&); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source); // Run select once until interrupted or events are ready to be dispatched. ASIO_DECL void run(long usec, op_queue& ops); // Interrupt the select loop. ASIO_DECL void interrupt(); private: #if defined(ASIO_HAS_IOCP) // Run the select loop in the thread. ASIO_DECL void run_thread(); #endif // defined(ASIO_HAS_IOCP) // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the select call. ASIO_DECL timeval* get_timeout(long usec, timeval& tv); // Cancel all operations associated with the given descriptor. This function // does not acquire the select_reactor's mutex. ASIO_DECL void cancel_ops_unlocked(socket_type descriptor, const asio::error_code& ec); // The scheduler implementation used to post completions. # if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_type; # else // defined(ASIO_HAS_IOCP) typedef class scheduler scheduler_type; # endif // defined(ASIO_HAS_IOCP) scheduler_type& scheduler_; // Mutex to protect access to internal data. asio::detail::mutex mutex_; // The interrupter is used to break a blocking select call. select_interrupter interrupter_; // The queues of read, write and except operations. reactor_op_queue op_queue_[max_ops]; // The file descriptor sets to be passed to the select system call. fd_set_adapter fd_sets_[max_select_ops]; // The timer queues. timer_queue_set timer_queues_; #if defined(ASIO_HAS_IOCP) // Helper class to run the reactor loop in a thread. class thread_function; friend class thread_function; // Does the reactor loop thread need to stop. bool stop_thread_; // The thread that is running the reactor loop. asio::detail::thread* thread_; #endif // defined(ASIO_HAS_IOCP) // Whether the service has been shut down. bool shutdown_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/select_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/select_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_IOCP) // || (!defined(ASIO_HAS_DEV_POLL) // && !defined(ASIO_HAS_EPOLL) // && !defined(ASIO_HAS_KQUEUE) // && !defined(ASIO_WINDOWS_RUNTIME)) #endif // ASIO_DETAIL_SELECT_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/reactive_socket_service_base.hpp000644 000164 177776 00000043417 15107057155 026252 0ustar00jenkinsnogroup000000 000000 // // detail/reactive_socket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_IOCP) \ && !defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/socket_base.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/reactive_null_buffers_op.hpp" #include "asio/detail/reactive_socket_recv_op.hpp" #include "asio/detail/reactive_socket_recvmsg_op.hpp" #include "asio/detail/reactive_socket_send_op.hpp" #include "asio/detail/reactive_wait_op.hpp" #include "asio/detail/reactor.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/socket_holder.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class reactive_socket_service_base { public: // The native type of a socket. typedef socket_type native_handle_type; // The implementation type of the socket. struct base_implementation_type { // The native socket representation. socket_type socket_; // The current state of the socket. socket_ops::state_type state_; // Per-descriptor data used by the reactor. reactor::per_descriptor_data reactor_data_; }; // Constructor. ASIO_DECL reactive_socket_service_base(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void base_shutdown(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type& impl); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, reactive_socket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != invalid_socket; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Release ownership of the socket. ASIO_DECL socket_type release( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. ASIO_DECL asio::error_code cancel( base_implementation_type& impl, asio::error_code& ec); // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::sockatmark(impl.socket_, ec); } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type& impl, asio::error_code& ec) const { return socket_ops::available(impl.socket_, ec); } // Place the socket into the state where it will listen for new connections. asio::error_code listen(base_implementation_type& impl, int backlog, asio::error_code& ec) { socket_ops::listen(impl.socket_, backlog, ec); return ec; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type& impl, IO_Control_Command& command, asio::error_code& ec) { socket_ops::ioctl(impl.socket_, impl.state_, command.name(), static_cast(command.data()), ec); return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::user_set_non_blocking) != 0; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_user_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type& impl) const { return (impl.state_ & socket_ops::internal_non_blocking) != 0; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type& impl, bool mode, asio::error_code& ec) { socket_ops::set_internal_non_blocking(impl.socket_, impl.state_, mode, ec); return ec; } // Wait for the socket to become ready to read, ready to write, or to have // pending error conditions. asio::error_code wait(base_implementation_type& impl, socket_base::wait_type w, asio::error_code& ec) { switch (w) { case socket_base::wait_read: socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); break; case socket_base::wait_write: socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); break; case socket_base::wait_error: socket_ops::poll_error(impl.socket_, impl.state_, -1, ec); break; default: ec = asio::error::invalid_argument; break; } return ec; } // Asynchronously wait for the socket to become ready to read, ready to // write, or to have pending error conditions. template void async_wait(base_implementation_type& impl, socket_base::wait_type w, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_wait_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_wait")); int op_type; switch (w) { case socket_base::wait_read: op_type = reactor::read_op; break; case socket_base::wait_write: op_type = reactor::write_op; break; case socket_base::wait_error: op_type = reactor::except_op; break; default: p.p->ec_ = asio::error::invalid_argument; reactor_.post_immediate_completion(p.p, is_continuation); p.v = p.p = 0; return; } start_op(impl, op_type, p.p, is_continuation, false, false); p.v = p.p = 0; } // Send the given data to the peer. template size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_send(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be sent without blocking. size_t send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_write(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_send_op< ConstBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_send")); start_op(impl, reactor::write_op, p.p, is_continuation, true, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_send(null_buffers)")); start_op(impl, reactor::write_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data from the peer. Returns the number of bytes received. template size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recv(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), flags, bufs.all_empty(), ec); } // Wait until data can be received without blocking. size_t receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recv_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, impl.state_, buffers, flags, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (flags & socket_base::message_out_of_band) == 0, ((impl.state_ & socket_ops::stream_oriented) && buffer_sequence_adapter::all_empty(buffers))); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type& impl, const null_buffers&, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive(null_buffers)")); start_op(impl, (flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } // Receive some data with associated flags. Returns the number of bytes // received. template size_t receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { buffer_sequence_adapter bufs(buffers); return socket_ops::sync_recvmsg(impl.socket_, impl.state_, bufs.buffers(), bufs.count(), in_flags, out_flags, ec); } // Wait until data can be received without blocking. size_t receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags, socket_base::message_flags& out_flags, asio::error_code& ec) { // Wait for socket to become ready. socket_ops::poll_read(impl.socket_, impl.state_, -1, ec); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive_with_flags(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_socket_recvmsg_op< MutableBufferSequence, Handler, IoExecutor> op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(impl.socket_, buffers, in_flags, out_flags, handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive_with_flags")); start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, (in_flags & socket_base::message_out_of_band) == 0, false); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive_with_flags(base_implementation_type& impl, const null_buffers&, socket_base::message_flags in_flags, socket_base::message_flags& out_flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef reactive_null_buffers_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(handler, io_ex); ASIO_HANDLER_CREATION((reactor_.context(), *p.p, "socket", &impl, impl.socket_, "async_receive_with_flags(null_buffers)")); // Clear out_flags, since we cannot give it any other sensible value when // performing a null_buffers operation. out_flags = 0; start_op(impl, (in_flags & socket_base::message_out_of_band) ? reactor::except_op : reactor::read_op, p.p, is_continuation, false, false); p.v = p.p = 0; } protected: // Open a new socket implementation. ASIO_DECL asio::error_code do_open( base_implementation_type& impl, int af, int type, int protocol, asio::error_code& ec); // Assign a native socket to a socket implementation. ASIO_DECL asio::error_code do_assign( base_implementation_type& impl, int type, const native_handle_type& native_socket, asio::error_code& ec); // Start the asynchronous read or write operation. ASIO_DECL void start_op(base_implementation_type& impl, int op_type, reactor_op* op, bool is_continuation, bool is_non_blocking, bool noop); // Start the asynchronous accept operation. ASIO_DECL void start_accept_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, bool peer_is_open); // Start the asynchronous connect operation. ASIO_DECL void start_connect_op(base_implementation_type& impl, reactor_op* op, bool is_continuation, const socket_addr_type* addr, size_t addrlen); // The selector that performs event demultiplexing for the service. reactor& reactor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/reactive_socket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // !defined(ASIO_HAS_IOCP) // && !defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_REACTIVE_SOCKET_SERVICE_BASE_HPP galera-4-26.4.25/asio/asio/detail/reactor.hpp000644 000164 177776 00000001627 15107057155 022022 0ustar00jenkinsnogroup000000 000000 // // detail/reactor.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_REACTOR_HPP #define ASIO_DETAIL_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/reactor_fwd.hpp" #if defined(ASIO_HAS_EPOLL) # include "asio/detail/epoll_reactor.hpp" #elif defined(ASIO_HAS_KQUEUE) # include "asio/detail/kqueue_reactor.hpp" #elif defined(ASIO_HAS_DEV_POLL) # include "asio/detail/dev_poll_reactor.hpp" #elif defined(ASIO_HAS_IOCP) || defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_reactor.hpp" #else # include "asio/detail/select_reactor.hpp" #endif #endif // ASIO_DETAIL_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/variadic_templates.hpp000644 000164 177776 00000010654 15107057155 024223 0ustar00jenkinsnogroup000000 000000 // // detail/variadic_templates.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #define ASIO_DETAIL_VARIADIC_TEMPLATES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # define ASIO_VARIADIC_TPARAMS(n) ASIO_VARIADIC_TPARAMS_##n # define ASIO_VARIADIC_TPARAMS_1 \ typename T1 # define ASIO_VARIADIC_TPARAMS_2 \ typename T1, typename T2 # define ASIO_VARIADIC_TPARAMS_3 \ typename T1, typename T2, typename T3 # define ASIO_VARIADIC_TPARAMS_4 \ typename T1, typename T2, typename T3, typename T4 # define ASIO_VARIADIC_TPARAMS_5 \ typename T1, typename T2, typename T3, typename T4, typename T5 # define ASIO_VARIADIC_TARGS(n) ASIO_VARIADIC_TARGS_##n # define ASIO_VARIADIC_TARGS_1 T1 # define ASIO_VARIADIC_TARGS_2 T1, T2 # define ASIO_VARIADIC_TARGS_3 T1, T2, T3 # define ASIO_VARIADIC_TARGS_4 T1, T2, T3, T4 # define ASIO_VARIADIC_TARGS_5 T1, T2, T3, T4, T5 # define ASIO_VARIADIC_BYVAL_PARAMS(n) \ ASIO_VARIADIC_BYVAL_PARAMS_##n # define ASIO_VARIADIC_BYVAL_PARAMS_1 T1 x1 # define ASIO_VARIADIC_BYVAL_PARAMS_2 T1 x1, T2 x2 # define ASIO_VARIADIC_BYVAL_PARAMS_3 T1 x1, T2 x2, T3 x3 # define ASIO_VARIADIC_BYVAL_PARAMS_4 T1 x1, T2 x2, T3 x3, T4 x4 # define ASIO_VARIADIC_BYVAL_PARAMS_5 T1 x1, T2 x2, T3 x3, T4 x4, T5 x5 # define ASIO_VARIADIC_BYVAL_ARGS(n) \ ASIO_VARIADIC_BYVAL_ARGS_##n # define ASIO_VARIADIC_BYVAL_ARGS_1 x1 # define ASIO_VARIADIC_BYVAL_ARGS_2 x1, x2 # define ASIO_VARIADIC_BYVAL_ARGS_3 x1, x2, x3 # define ASIO_VARIADIC_BYVAL_ARGS_4 x1, x2, x3, x4 # define ASIO_VARIADIC_BYVAL_ARGS_5 x1, x2, x3, x4, x5 # define ASIO_VARIADIC_CONSTREF_PARAMS(n) \ ASIO_VARIADIC_CONSTREF_PARAMS_##n # define ASIO_VARIADIC_CONSTREF_PARAMS_1 \ const T1& x1 # define ASIO_VARIADIC_CONSTREF_PARAMS_2 \ const T1& x1, const T2& x2 # define ASIO_VARIADIC_CONSTREF_PARAMS_3 \ const T1& x1, const T2& x2, const T3& x3 # define ASIO_VARIADIC_CONSTREF_PARAMS_4 \ const T1& x1, const T2& x2, const T3& x3, const T4& x4 # define ASIO_VARIADIC_CONSTREF_PARAMS_5 \ const T1& x1, const T2& x2, const T3& x3, const T4& x4, const T5& x5 # define ASIO_VARIADIC_MOVE_PARAMS(n) \ ASIO_VARIADIC_MOVE_PARAMS_##n # define ASIO_VARIADIC_MOVE_PARAMS_1 \ ASIO_MOVE_ARG(T1) x1 # define ASIO_VARIADIC_MOVE_PARAMS_2 \ ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2 # define ASIO_VARIADIC_MOVE_PARAMS_3 \ ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \ ASIO_MOVE_ARG(T3) x3 # define ASIO_VARIADIC_MOVE_PARAMS_4 \ ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \ ASIO_MOVE_ARG(T3) x3, ASIO_MOVE_ARG(T4) x4 # define ASIO_VARIADIC_MOVE_PARAMS_5 \ ASIO_MOVE_ARG(T1) x1, ASIO_MOVE_ARG(T2) x2, \ ASIO_MOVE_ARG(T3) x3, ASIO_MOVE_ARG(T4) x4, \ ASIO_MOVE_ARG(T5) x5 # define ASIO_VARIADIC_MOVE_ARGS(n) \ ASIO_VARIADIC_MOVE_ARGS_##n # define ASIO_VARIADIC_MOVE_ARGS_1 \ ASIO_MOVE_CAST(T1)(x1) # define ASIO_VARIADIC_MOVE_ARGS_2 \ ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2) # define ASIO_VARIADIC_MOVE_ARGS_3 \ ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \ ASIO_MOVE_CAST(T3)(x3) # define ASIO_VARIADIC_MOVE_ARGS_4 \ ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \ ASIO_MOVE_CAST(T3)(x3), ASIO_MOVE_CAST(T4)(x4) # define ASIO_VARIADIC_MOVE_ARGS_5 \ ASIO_MOVE_CAST(T1)(x1), ASIO_MOVE_CAST(T2)(x2), \ ASIO_MOVE_CAST(T3)(x3), ASIO_MOVE_CAST(T4)(x4), \ ASIO_MOVE_CAST(T5)(x5) # define ASIO_VARIADIC_DECAY(n) \ ASIO_VARIADIC_DECAY_##n # define ASIO_VARIADIC_DECAY_1 \ typename decay::type # define ASIO_VARIADIC_DECAY_2 \ typename decay::type, typename decay::type # define ASIO_VARIADIC_DECAY_3 \ typename decay::type, typename decay::type, \ typename decay::type # define ASIO_VARIADIC_DECAY_4 \ typename decay::type, typename decay::type, \ typename decay::type, typename decay::type # define ASIO_VARIADIC_DECAY_5 \ typename decay::type, typename decay::type, \ typename decay::type, typename decay::type, \ typename decay::type # define ASIO_VARIADIC_GENERATE(m) m(1) m(2) m(3) m(4) m(5) #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // ASIO_DETAIL_VARIADIC_TEMPLATES_HPP galera-4-26.4.25/asio/asio/detail/global.hpp000644 000164 177776 00000002356 15107057155 021623 0ustar00jenkinsnogroup000000 000000 // // detail/global.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_GLOBAL_HPP #define ASIO_DETAIL_GLOBAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) # include "asio/detail/null_global.hpp" #elif defined(ASIO_WINDOWS) # include "asio/detail/win_global.hpp" #elif defined(ASIO_HAS_PTHREADS) # include "asio/detail/posix_global.hpp" #elif defined(ASIO_HAS_STD_CALL_ONCE) # include "asio/detail/std_global.hpp" #else # error Only Windows, POSIX and std::call_once are supported! #endif namespace asio { namespace detail { template inline T& global() { #if !defined(ASIO_HAS_THREADS) return null_global(); #elif defined(ASIO_WINDOWS) return win_global(); #elif defined(ASIO_HAS_PTHREADS) return posix_global(); #elif defined(ASIO_HAS_STD_CALL_ONCE) return std_global(); #endif } } // namespace detail } // namespace asio #endif // ASIO_DETAIL_GLOBAL_HPP galera-4-26.4.25/asio/asio/detail/null_static_mutex.hpp000644 000164 177776 00000002201 15107057155 024113 0ustar00jenkinsnogroup000000 000000 // // detail/null_static_mutex.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #define ASIO_DETAIL_NULL_STATIC_MUTEX_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_HAS_THREADS) #include "asio/detail/scoped_lock.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct null_static_mutex { typedef asio::detail::scoped_lock scoped_lock; // Initialise the mutex. void init() { } // Lock the mutex. void lock() { } // Unlock the mutex. void unlock() { } int unused_; }; #define ASIO_NULL_STATIC_MUTEX_INIT { 0 } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_HAS_THREADS) #endif // ASIO_DETAIL_NULL_STATIC_MUTEX_HPP galera-4-26.4.25/asio/asio/detail/throw_error.hpp000644 000164 177776 00000002314 15107057155 022731 0ustar00jenkinsnogroup000000 000000 // // detail/throw_error.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_THROW_ERROR_HPP #define ASIO_DETAIL_THROW_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { ASIO_DECL void do_throw_error(const asio::error_code& err); ASIO_DECL void do_throw_error(const asio::error_code& err, const char* location); inline void throw_error(const asio::error_code& err) { if (err) do_throw_error(err); } inline void throw_error(const asio::error_code& err, const char* location) { if (err) do_throw_error(err, location); } } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/throw_error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_THROW_ERROR_HPP galera-4-26.4.25/asio/asio/detail/hash_map.hpp000644 000164 177776 00000017770 15107057155 022151 0ustar00jenkinsnogroup000000 000000 // // detail/hash_map.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_HASH_MAP_HPP #define ASIO_DETAIL_HASH_MAP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/assert.hpp" #include "asio/detail/noncopyable.hpp" #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) # include "asio/detail/socket_types.hpp" #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { inline std::size_t calculate_hash_value(int i) { return static_cast(i); } inline std::size_t calculate_hash_value(void* p) { return reinterpret_cast(p) + (reinterpret_cast(p) >> 3); } #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) inline std::size_t calculate_hash_value(SOCKET s) { return static_cast(s); } #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) // Note: assumes K and V are POD types. template class hash_map : private noncopyable { public: // The type of a value in the map. typedef std::pair value_type; // The type of a non-const iterator over the hash map. typedef typename std::list::iterator iterator; // The type of a const iterator over the hash map. typedef typename std::list::const_iterator const_iterator; // Constructor. hash_map() : size_(0), buckets_(0), num_buckets_(0) { } // Destructor. ~hash_map() { delete[] buckets_; } // Get an iterator for the beginning of the map. iterator begin() { return values_.begin(); } // Get an iterator for the beginning of the map. const_iterator begin() const { return values_.begin(); } // Get an iterator for the end of the map. iterator end() { return values_.end(); } // Get an iterator for the end of the map. const_iterator end() const { return values_.end(); } // Check whether the map is empty. bool empty() const { return values_.empty(); } // Find an entry in the map. iterator find(const K& k) { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) return values_.end(); iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Find an entry in the map. const_iterator find(const K& k) const { if (num_buckets_) { size_t bucket = calculate_hash_value(k) % num_buckets_; const_iterator it = buckets_[bucket].first; if (it == values_.end()) return it; const_iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == k) return it; ++it; } } return values_.end(); } // Insert a new entry into the map. std::pair insert(const value_type& v) { if (size_ + 1 >= num_buckets_) rehash(hash_size(size_ + 1)); size_t bucket = calculate_hash_value(v.first) % num_buckets_; iterator it = buckets_[bucket].first; if (it == values_.end()) { buckets_[bucket].first = buckets_[bucket].last = values_insert(values_.end(), v); ++size_; return std::pair(buckets_[bucket].last, true); } iterator end_it = buckets_[bucket].last; ++end_it; while (it != end_it) { if (it->first == v.first) return std::pair(it, false); ++it; } buckets_[bucket].last = values_insert(end_it, v); ++size_; return std::pair(buckets_[bucket].last, true); } // Erase an entry from the map. void erase(iterator it) { ASIO_ASSERT(it != values_.end()); ASIO_ASSERT(num_buckets_ != 0); size_t bucket = calculate_hash_value(it->first) % num_buckets_; bool is_first = (it == buckets_[bucket].first); bool is_last = (it == buckets_[bucket].last); if (is_first && is_last) buckets_[bucket].first = buckets_[bucket].last = values_.end(); else if (is_first) ++buckets_[bucket].first; else if (is_last) --buckets_[bucket].last; values_erase(it); --size_; } // Erase a key from the map. void erase(const K& k) { iterator it = find(k); if (it != values_.end()) erase(it); } // Remove all entries from the map. void clear() { // Clear the values. values_.clear(); size_ = 0; // Initialise all buckets to empty. iterator end_it = values_.end(); for (size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_it; } private: // Calculate the hash size for the specified number of elements. static std::size_t hash_size(std::size_t num_elems) { static std::size_t sizes[] = { #if defined(ASIO_HASH_MAP_BUCKETS) ASIO_HASH_MAP_BUCKETS #else // ASIO_HASH_MAP_BUCKETS 3, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, 49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469, 12582917, 25165843 #endif // ASIO_HASH_MAP_BUCKETS }; const std::size_t nth_size = sizeof(sizes) / sizeof(std::size_t) - 1; for (std::size_t i = 0; i < nth_size; ++i) if (num_elems < sizes[i]) return sizes[i]; return sizes[nth_size]; } // Re-initialise the hash from the values already contained in the list. void rehash(std::size_t num_buckets) { if (num_buckets == num_buckets_) return; ASIO_ASSERT(num_buckets != 0); iterator end_iter = values_.end(); // Update number of buckets and initialise all buckets to empty. bucket_type* tmp = new bucket_type[num_buckets]; delete[] buckets_; buckets_ = tmp; num_buckets_ = num_buckets; for (std::size_t i = 0; i < num_buckets_; ++i) buckets_[i].first = buckets_[i].last = end_iter; // Put all values back into the hash. iterator iter = values_.begin(); while (iter != end_iter) { std::size_t bucket = calculate_hash_value(iter->first) % num_buckets_; if (buckets_[bucket].last == end_iter) { buckets_[bucket].first = buckets_[bucket].last = iter++; } else if (++buckets_[bucket].last == iter) { ++iter; } else { values_.splice(buckets_[bucket].last, values_, iter++); --buckets_[bucket].last; } } } // Insert an element into the values list by splicing from the spares list, // if a spare is available, and otherwise by inserting a new element. iterator values_insert(iterator it, const value_type& v) { if (spares_.empty()) { return values_.insert(it, v); } else { spares_.front() = v; values_.splice(it, spares_, spares_.begin()); return --it; } } // Erase an element from the values list by splicing it to the spares list. void values_erase(iterator it) { *it = value_type(); spares_.splice(spares_.begin(), values_, it); } // The number of elements in the hash. std::size_t size_; // The list of all values in the hash map. std::list values_; // The list of spare nodes waiting to be recycled. Assumes that POD types only // are stored in the hash map. std::list spares_; // The type for a bucket in the hash table. struct bucket_type { iterator first; iterator last; }; // The buckets in the hash. bucket_type* buckets_; // The number of buckets in the hash. std::size_t num_buckets_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_HASH_MAP_HPP galera-4-26.4.25/asio/asio/detail/resolver_service_base.hpp000644 000164 177776 00000007753 15107057155 024744 0ustar00jenkinsnogroup000000 000000 // // detail/resolver_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #define ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/resolve_op.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/detail/thread.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class resolver_service_base { public: // The implementation type of the resolver. A cancellation token is used to // indicate to the background thread that the operation has been cancelled. typedef socket_ops::shared_cancel_token_type implementation_type; // Constructor. ASIO_DECL resolver_service_base(execution_context& context); // Destructor. ASIO_DECL ~resolver_service_base(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void base_shutdown(); // Perform any fork-related housekeeping. ASIO_DECL void base_notify_fork( execution_context::fork_event fork_ev); // Construct a new resolver implementation. ASIO_DECL void construct(implementation_type& impl); // Destroy a resolver implementation. ASIO_DECL void destroy(implementation_type&); // Move-construct a new resolver implementation. ASIO_DECL void move_construct(implementation_type& impl, implementation_type& other_impl); // Move-assign from another resolver implementation. ASIO_DECL void move_assign(implementation_type& impl, resolver_service_base& other_service, implementation_type& other_impl); // Cancel pending asynchronous operations. ASIO_DECL void cancel(implementation_type& impl); protected: // Helper function to start an asynchronous resolve operation. ASIO_DECL void start_resolve_op(resolve_op* op); #if !defined(ASIO_WINDOWS_RUNTIME) // Helper class to perform exception-safe cleanup of addrinfo objects. class auto_addrinfo : private asio::detail::noncopyable { public: explicit auto_addrinfo(asio::detail::addrinfo_type* ai) : ai_(ai) { } ~auto_addrinfo() { if (ai_) socket_ops::freeaddrinfo(ai_); } operator asio::detail::addrinfo_type*() { return ai_; } private: asio::detail::addrinfo_type* ai_; }; #endif // !defined(ASIO_WINDOWS_RUNTIME) // Helper class to run the work scheduler in a thread. class work_scheduler_runner; // Start the work scheduler if it's not already running. ASIO_DECL void start_work_thread(); // The scheduler implementation used to post completions. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; private: // Mutex to protect access to internal data. asio::detail::mutex mutex_; // Private scheduler used for performing asynchronous host resolution. asio::detail::scoped_ptr work_scheduler_; // Thread used for running the work io_context's run loop. asio::detail::scoped_ptr work_thread_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/resolver_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_RESOLVER_SERVICE_BASE_HPP galera-4-26.4.25/asio/asio/detail/socket_types.hpp000644 000164 177776 00000035054 15107057155 023100 0ustar00jenkinsnogroup000000 000000 // // detail/socket_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SOCKET_TYPES_HPP #define ASIO_DETAIL_SOCKET_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) // Empty. #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # if defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # error WinSock.h has already been included # endif // defined(_WINSOCKAPI_) && !defined(_WINSOCK2API_) # if defined(__BORLANDC__) # include // Needed for __errno # if !defined(_WSPIAPI_H_) # define _WSPIAPI_H_ # define ASIO_WSPIAPI_H_DEFINED # endif // !defined(_WSPIAPI_H_) # endif // defined(__BORLANDC__) # include # include # if defined(WINAPI_FAMILY) # if ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # include # endif // ((WINAPI_FAMILY & WINAPI_PARTITION_DESKTOP) != 0) # endif // defined(WINAPI_FAMILY) # if !defined(ASIO_WINDOWS_APP) # include # endif // !defined(ASIO_WINDOWS_APP) # if defined(ASIO_WSPIAPI_H_DEFINED) # undef _WSPIAPI_H_ # undef ASIO_WSPIAPI_H_DEFINED # endif // defined(ASIO_WSPIAPI_H_DEFINED) # if !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # if defined(UNDER_CE) # pragma comment(lib, "ws2.lib") # elif defined(_MSC_VER) || defined(__BORLANDC__) # pragma comment(lib, "ws2_32.lib") # if !defined(ASIO_WINDOWS_APP) # pragma comment(lib, "mswsock.lib") # endif // !defined(ASIO_WINDOWS_APP) # endif // defined(_MSC_VER) || defined(__BORLANDC__) # endif // !defined(ASIO_NO_DEFAULT_LINKED_LIBS) # include "asio/detail/old_win_sdk_compat.hpp" #else # include # if (defined(__MACH__) && defined(__APPLE__)) \ || defined(__FreeBSD__) || defined(__NetBSD__) \ || defined(__OpenBSD__) || defined(__linux__) \ || defined(__EMSCRIPTEN__) # include # elif !defined(__SYMBIAN32__) # include # endif # include # include # include # if defined(__hpux) # include # endif # if !defined(__hpux) || defined(__SELECT) # include # endif # include # include # include # include # if !defined(__SYMBIAN32__) # include # endif # include # include # include # include # if defined(__sun) # include # include # endif #endif #include "asio/detail/push_options.hpp" namespace asio { namespace detail { #if defined(ASIO_WINDOWS_RUNTIME) const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef unsigned __int32 u_long_type; typedef unsigned __int16 u_short_type; struct in4_addr_type { u_long_type s_addr; }; struct in4_mreq_type { in4_addr_type imr_multiaddr, imr_interface; }; struct in6_addr_type { unsigned char s6_addr[16]; }; struct in6_mreq_type { in6_addr_type ipv6mr_multiaddr; unsigned long ipv6mr_interface; }; struct socket_addr_type { int sa_family; }; struct sockaddr_in4_type { int sin_family; in4_addr_type sin_addr; u_short_type sin_port; }; struct sockaddr_in6_type { int sin6_family; in6_addr_type sin6_addr; u_short_type sin6_port; u_long_type sin6_flowinfo; u_long_type sin6_scope_id; }; struct sockaddr_storage_type { int ss_family; unsigned char ss_bytes[128 - sizeof(int)]; }; struct addrinfo_type { int ai_flags; int ai_family, ai_socktype, ai_protocol; int ai_addrlen; const void* ai_addr; const char* ai_canonname; addrinfo_type* ai_next; }; struct linger_type { u_short_type l_onoff, l_linger; }; typedef u_long_type ioctl_arg_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC 0 # define ASIO_OS_DEF_AF_INET 2 # define ASIO_OS_DEF_AF_INET6 23 # define ASIO_OS_DEF_SOCK_STREAM 1 # define ASIO_OS_DEF_SOCK_DGRAM 2 # define ASIO_OS_DEF_SOCK_RAW 3 # define ASIO_OS_DEF_SOCK_SEQPACKET 5 # define ASIO_OS_DEF_IPPROTO_IP 0 # define ASIO_OS_DEF_IPPROTO_IPV6 41 # define ASIO_OS_DEF_IPPROTO_TCP 6 # define ASIO_OS_DEF_IPPROTO_UDP 17 # define ASIO_OS_DEF_IPPROTO_ICMP 1 # define ASIO_OS_DEF_IPPROTO_ICMPV6 58 # define ASIO_OS_DEF_FIONBIO 1 # define ASIO_OS_DEF_FIONREAD 2 # define ASIO_OS_DEF_INADDR_ANY 0 # define ASIO_OS_DEF_MSG_OOB 0x1 # define ASIO_OS_DEF_MSG_PEEK 0x2 # define ASIO_OS_DEF_MSG_DONTROUTE 0x4 # define ASIO_OS_DEF_MSG_EOR 0 // Not supported. # define ASIO_OS_DEF_SHUT_RD 0x0 # define ASIO_OS_DEF_SHUT_WR 0x1 # define ASIO_OS_DEF_SHUT_RDWR 0x2 # define ASIO_OS_DEF_SOMAXCONN 0x7fffffff # define ASIO_OS_DEF_SOL_SOCKET 0xffff # define ASIO_OS_DEF_SO_BROADCAST 0x20 # define ASIO_OS_DEF_SO_DEBUG 0x1 # define ASIO_OS_DEF_SO_DONTROUTE 0x10 # define ASIO_OS_DEF_SO_KEEPALIVE 0x8 # define ASIO_OS_DEF_SO_LINGER 0x80 # define ASIO_OS_DEF_SO_OOBINLINE 0x100 # define ASIO_OS_DEF_SO_SNDBUF 0x1001 # define ASIO_OS_DEF_SO_RCVBUF 0x1002 # define ASIO_OS_DEF_SO_SNDLOWAT 0x1003 # define ASIO_OS_DEF_SO_RCVLOWAT 0x1004 # define ASIO_OS_DEF_SO_REUSEADDR 0x4 # define ASIO_OS_DEF_TCP_NODELAY 0x1 # define ASIO_OS_DEF_IP_MULTICAST_IF 2 # define ASIO_OS_DEF_IP_MULTICAST_TTL 3 # define ASIO_OS_DEF_IP_MULTICAST_LOOP 4 # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP 5 # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP 6 # define ASIO_OS_DEF_IP_TTL 7 # define ASIO_OS_DEF_IPV6_UNICAST_HOPS 4 # define ASIO_OS_DEF_IPV6_MULTICAST_IF 9 # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS 10 # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP 11 # define ASIO_OS_DEF_IPV6_JOIN_GROUP 12 # define ASIO_OS_DEF_IPV6_LEAVE_GROUP 13 # define ASIO_OS_DEF_AI_CANONNAME 0x2 # define ASIO_OS_DEF_AI_PASSIVE 0x1 # define ASIO_OS_DEF_AI_NUMERICHOST 0x4 # define ASIO_OS_DEF_AI_NUMERICSERV 0x8 # define ASIO_OS_DEF_AI_V4MAPPED 0x800 # define ASIO_OS_DEF_AI_ALL 0x100 # define ASIO_OS_DEF_AI_ADDRCONFIG 0x400 #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) typedef SOCKET socket_type; const SOCKET invalid_socket = INVALID_SOCKET; const int socket_error_retval = SOCKET_ERROR; const int max_addr_v4_str_len = 256; const int max_addr_v6_str_len = 256; typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; typedef ip_mreq in4_mreq_type; typedef sockaddr_in sockaddr_in4_type; # if defined(ASIO_HAS_OLD_WIN_SDK) typedef in6_addr_emulation in6_addr_type; typedef ipv6_mreq_emulation in6_mreq_type; typedef sockaddr_in6_emulation sockaddr_in6_type; typedef sockaddr_storage_emulation sockaddr_storage_type; typedef addrinfo_emulation addrinfo_type; # else typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef addrinfo addrinfo_type; # endif typedef ::linger linger_type; typedef unsigned long ioctl_arg_type; typedef u_long u_long_type; typedef u_short u_short_type; typedef int signed_size_type; # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR 0 // Not supported on Windows. # define ASIO_OS_DEF_SHUT_RD SD_RECEIVE # define ASIO_OS_DEF_SHUT_WR SD_SEND # define ASIO_OS_DEF_SHUT_RDWR SD_BOTH # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_OOBINLINE SO_OOBINLINE # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif # if defined(AI_V4MAPPED) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined (_WIN32_WINNT) const int max_iov_len = 64; # else const int max_iov_len = 16; # endif #else typedef int socket_type; const int invalid_socket = -1; const int socket_error_retval = -1; const int max_addr_v4_str_len = INET_ADDRSTRLEN; #if defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = INET6_ADDRSTRLEN + 1 + IF_NAMESIZE; #else // defined(INET6_ADDRSTRLEN) const int max_addr_v6_str_len = 256; #endif // defined(INET6_ADDRSTRLEN) typedef sockaddr socket_addr_type; typedef in_addr in4_addr_type; # if defined(__hpux) // HP-UX doesn't provide ip_mreq when _XOPEN_SOURCE_EXTENDED is defined. struct in4_mreq_type { struct in_addr imr_multiaddr; struct in_addr imr_interface; }; # else typedef ip_mreq in4_mreq_type; # endif typedef sockaddr_in sockaddr_in4_type; typedef in6_addr in6_addr_type; typedef ipv6_mreq in6_mreq_type; typedef sockaddr_in6 sockaddr_in6_type; typedef sockaddr_storage sockaddr_storage_type; typedef sockaddr_un sockaddr_un_type; typedef addrinfo addrinfo_type; typedef ::linger linger_type; typedef int ioctl_arg_type; typedef uint32_t u_long_type; typedef uint16_t u_short_type; #if defined(ASIO_HAS_SSIZE_T) typedef ssize_t signed_size_type; #else // defined(ASIO_HAS_SSIZE_T) typedef int signed_size_type; #endif // defined(ASIO_HAS_SSIZE_T) # define ASIO_OS_DEF(c) ASIO_OS_DEF_##c # define ASIO_OS_DEF_AF_UNSPEC AF_UNSPEC # define ASIO_OS_DEF_AF_INET AF_INET # define ASIO_OS_DEF_AF_INET6 AF_INET6 # define ASIO_OS_DEF_SOCK_STREAM SOCK_STREAM # define ASIO_OS_DEF_SOCK_DGRAM SOCK_DGRAM # define ASIO_OS_DEF_SOCK_RAW SOCK_RAW # define ASIO_OS_DEF_SOCK_SEQPACKET SOCK_SEQPACKET # define ASIO_OS_DEF_IPPROTO_IP IPPROTO_IP # define ASIO_OS_DEF_IPPROTO_IPV6 IPPROTO_IPV6 # define ASIO_OS_DEF_IPPROTO_TCP IPPROTO_TCP # define ASIO_OS_DEF_IPPROTO_UDP IPPROTO_UDP # define ASIO_OS_DEF_IPPROTO_ICMP IPPROTO_ICMP # define ASIO_OS_DEF_IPPROTO_ICMPV6 IPPROTO_ICMPV6 # define ASIO_OS_DEF_FIONBIO FIONBIO # define ASIO_OS_DEF_FIONREAD FIONREAD # define ASIO_OS_DEF_INADDR_ANY INADDR_ANY # define ASIO_OS_DEF_MSG_OOB MSG_OOB # define ASIO_OS_DEF_MSG_PEEK MSG_PEEK # define ASIO_OS_DEF_MSG_DONTROUTE MSG_DONTROUTE # define ASIO_OS_DEF_MSG_EOR MSG_EOR # define ASIO_OS_DEF_SHUT_RD SHUT_RD # define ASIO_OS_DEF_SHUT_WR SHUT_WR # define ASIO_OS_DEF_SHUT_RDWR SHUT_RDWR # define ASIO_OS_DEF_SOMAXCONN SOMAXCONN # define ASIO_OS_DEF_SOL_SOCKET SOL_SOCKET # define ASIO_OS_DEF_SO_BROADCAST SO_BROADCAST # define ASIO_OS_DEF_SO_DEBUG SO_DEBUG # define ASIO_OS_DEF_SO_DONTROUTE SO_DONTROUTE # define ASIO_OS_DEF_SO_KEEPALIVE SO_KEEPALIVE # define ASIO_OS_DEF_SO_LINGER SO_LINGER # define ASIO_OS_DEF_SO_OOBINLINE SO_OOBINLINE # define ASIO_OS_DEF_SO_SNDBUF SO_SNDBUF # define ASIO_OS_DEF_SO_RCVBUF SO_RCVBUF # define ASIO_OS_DEF_SO_SNDLOWAT SO_SNDLOWAT # define ASIO_OS_DEF_SO_RCVLOWAT SO_RCVLOWAT # define ASIO_OS_DEF_SO_REUSEADDR SO_REUSEADDR # define ASIO_OS_DEF_TCP_NODELAY TCP_NODELAY # define ASIO_OS_DEF_IP_MULTICAST_IF IP_MULTICAST_IF # define ASIO_OS_DEF_IP_MULTICAST_TTL IP_MULTICAST_TTL # define ASIO_OS_DEF_IP_MULTICAST_LOOP IP_MULTICAST_LOOP # define ASIO_OS_DEF_IP_ADD_MEMBERSHIP IP_ADD_MEMBERSHIP # define ASIO_OS_DEF_IP_DROP_MEMBERSHIP IP_DROP_MEMBERSHIP # define ASIO_OS_DEF_IP_TTL IP_TTL # define ASIO_OS_DEF_IPV6_UNICAST_HOPS IPV6_UNICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_IF IPV6_MULTICAST_IF # define ASIO_OS_DEF_IPV6_MULTICAST_HOPS IPV6_MULTICAST_HOPS # define ASIO_OS_DEF_IPV6_MULTICAST_LOOP IPV6_MULTICAST_LOOP # define ASIO_OS_DEF_IPV6_JOIN_GROUP IPV6_JOIN_GROUP # define ASIO_OS_DEF_IPV6_LEAVE_GROUP IPV6_LEAVE_GROUP # define ASIO_OS_DEF_AI_CANONNAME AI_CANONNAME # define ASIO_OS_DEF_AI_PASSIVE AI_PASSIVE # define ASIO_OS_DEF_AI_NUMERICHOST AI_NUMERICHOST # if defined(AI_NUMERICSERV) # define ASIO_OS_DEF_AI_NUMERICSERV AI_NUMERICSERV # else # define ASIO_OS_DEF_AI_NUMERICSERV 0 # endif // Note: QNX Neutrino 6.3 defines AI_V4MAPPED, AI_ALL and AI_ADDRCONFIG but // does not implement them. Therefore they are specifically excluded here. # if defined(AI_V4MAPPED) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_V4MAPPED AI_V4MAPPED # else # define ASIO_OS_DEF_AI_V4MAPPED 0 # endif # if defined(AI_ALL) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ALL AI_ALL # else # define ASIO_OS_DEF_AI_ALL 0 # endif # if defined(AI_ADDRCONFIG) && !defined(__QNXNTO__) # define ASIO_OS_DEF_AI_ADDRCONFIG AI_ADDRCONFIG # else # define ASIO_OS_DEF_AI_ADDRCONFIG 0 # endif # if defined(IOV_MAX) const int max_iov_len = IOV_MAX; # else // POSIX platforms are not required to define IOV_MAX. const int max_iov_len = 16; # endif #endif const int custom_socket_option_level = 0xA5100000; const int enable_connection_aborted_option = 1; const int always_fail_option = 2; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SOCKET_TYPES_HPP galera-4-26.4.25/asio/asio/detail/buffered_stream_storage.hpp000644 000164 177776 00000005472 15107057155 025246 0ustar00jenkinsnogroup000000 000000 // // detail/buffered_stream_storage.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #define ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/assert.hpp" #include #include #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class buffered_stream_storage { public: // The type of the bytes stored in the buffer. typedef unsigned char byte_type; // The type used for offsets into the buffer. typedef std::size_t size_type; // Constructor. explicit buffered_stream_storage(std::size_t buffer_capacity) : begin_offset_(0), end_offset_(0), buffer_(buffer_capacity) { } /// Clear the buffer. void clear() { begin_offset_ = 0; end_offset_ = 0; } // Return a pointer to the beginning of the unread data. mutable_buffer data() { return asio::buffer(buffer_) + begin_offset_; } // Return a pointer to the beginning of the unread data. const_buffer data() const { return asio::buffer(buffer_) + begin_offset_; } // Is there no unread data in the buffer. bool empty() const { return begin_offset_ == end_offset_; } // Return the amount of unread data the is in the buffer. size_type size() const { return end_offset_ - begin_offset_; } // Resize the buffer to the specified length. void resize(size_type length) { ASIO_ASSERT(length <= capacity()); if (begin_offset_ + length <= capacity()) { end_offset_ = begin_offset_ + length; } else { using namespace std; // For memmove. memmove(&buffer_[0], &buffer_[0] + begin_offset_, size()); end_offset_ = length; begin_offset_ = 0; } } // Return the maximum size for data in the buffer. size_type capacity() const { return buffer_.size(); } // Consume multiple bytes from the beginning of the buffer. void consume(size_type count) { ASIO_ASSERT(begin_offset_ + count <= end_offset_); begin_offset_ += count; if (empty()) clear(); } private: // The offset to the beginning of the unread data. size_type begin_offset_; // The offset to the end of the unread data. size_type end_offset_; // The data in the buffer. std::vector buffer_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP galera-4-26.4.25/asio/asio/detail/winrt_ssocket_service_base.hpp000644 000164 177776 00000027527 15107057155 026002 0ustar00jenkinsnogroup000000 000000 // // detail/winrt_ssocket_service_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #define ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS_RUNTIME) #include "asio/buffer.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/socket_base.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/winrt_async_manager.hpp" #include "asio/detail/winrt_socket_recv_op.hpp" #include "asio/detail/winrt_socket_send_op.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_io_context.hpp" #else // defined(ASIO_HAS_IOCP) # include "asio/detail/scheduler.hpp" #endif // defined(ASIO_HAS_IOCP) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class winrt_ssocket_service_base { public: // The native type of a socket. typedef Windows::Networking::Sockets::StreamSocket^ native_handle_type; // The implementation type of the socket. struct base_implementation_type { // Default constructor. base_implementation_type() : socket_(nullptr), next_(0), prev_(0) { } // The underlying native socket. native_handle_type socket_; // Pointers to adjacent socket implementations in linked list. base_implementation_type* next_; base_implementation_type* prev_; }; // Constructor. ASIO_DECL winrt_ssocket_service_base(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void base_shutdown(); // Construct a new socket implementation. ASIO_DECL void construct(base_implementation_type&); // Move-construct a new socket implementation. ASIO_DECL void base_move_construct(base_implementation_type& impl, base_implementation_type& other_impl); // Move-assign from another socket implementation. ASIO_DECL void base_move_assign(base_implementation_type& impl, winrt_ssocket_service_base& other_service, base_implementation_type& other_impl); // Destroy a socket implementation. ASIO_DECL void destroy(base_implementation_type& impl); // Determine whether the socket is open. bool is_open(const base_implementation_type& impl) const { return impl.socket_ != nullptr; } // Destroy a socket implementation. ASIO_DECL asio::error_code close( base_implementation_type& impl, asio::error_code& ec); // Release ownership of the socket. ASIO_DECL native_handle_type release( base_implementation_type& impl, asio::error_code& ec); // Get the native socket representation. native_handle_type native_handle(base_implementation_type& impl) { return impl.socket_; } // Cancel all operations associated with the socket. asio::error_code cancel(base_implementation_type&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Determine whether the socket is at the out-of-band data mark. bool at_mark(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return false; } // Determine the number of bytes available for reading. std::size_t available(const base_implementation_type&, asio::error_code& ec) const { ec = asio::error::operation_not_supported; return 0; } // Perform an IO control command on the socket. template asio::error_code io_control(base_implementation_type&, IO_Control_Command&, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the socket. bool non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the socket. asio::error_code non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Gets the non-blocking mode of the native socket implementation. bool native_non_blocking(const base_implementation_type&) const { return false; } // Sets the non-blocking mode of the native socket implementation. asio::error_code native_non_blocking(base_implementation_type&, bool, asio::error_code& ec) { ec = asio::error::operation_not_supported; return ec; } // Send the given data to the peer. template std::size_t send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_send(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be sent without blocking. std::size_t send(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous send. The data being sent must be valid for the // lifetime of the asynchronous operation. template void async_send(base_implementation_type& impl, const ConstBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_send_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "socket", &impl, 0, "async_send")); start_send_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Start an asynchronous wait until data can be sent without blocking. template void async_send(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler(handler, ec, bytes_transferred)); } // Receive some data from the peer. Returns the number of bytes received. template std::size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, asio::error_code& ec) { return do_receive(impl, buffer_sequence_adapter::first(buffers), flags, ec); } // Wait until data can be received without blocking. std::size_t receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, asio::error_code& ec) { ec = asio::error::operation_not_supported; return 0; } // Start an asynchronous receive. The buffer for the data being received // must be valid for the lifetime of the asynchronous operation. template void async_receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, Handler& handler, const IoExecutor& io_ex) { bool is_continuation = asio_handler_cont_helpers::is_continuation(handler); // Allocate and construct an operation to wrap the handler. typedef winrt_socket_recv_op op; typename op::ptr p = { asio::detail::addressof(handler), op::ptr::allocate(handler), 0 }; p.p = new (p.v) op(buffers, handler, io_ex); ASIO_HANDLER_CREATION((scheduler_.context(), *p.p, "socket", &impl, 0, "async_receive")); start_receive_op(impl, buffer_sequence_adapter::first(buffers), flags, p.p, is_continuation); p.v = p.p = 0; } // Wait until data can be received without blocking. template void async_receive(base_implementation_type&, const null_buffers&, socket_base::message_flags, Handler& handler, const IoExecutor& io_ex) { asio::error_code ec = asio::error::operation_not_supported; const std::size_t bytes_transferred = 0; asio::post(io_ex, detail::bind_handler(handler, ec, bytes_transferred)); } protected: // Helper function to obtain endpoints associated with the connection. ASIO_DECL std::size_t do_get_endpoint( const base_implementation_type& impl, bool local, void* addr, std::size_t addr_len, asio::error_code& ec) const; // Helper function to set a socket option. ASIO_DECL asio::error_code do_set_option( base_implementation_type& impl, int level, int optname, const void* optval, std::size_t optlen, asio::error_code& ec); // Helper function to get a socket option. ASIO_DECL void do_get_option( const base_implementation_type& impl, int level, int optname, void* optval, std::size_t* optlen, asio::error_code& ec) const; // Helper function to perform a synchronous connect. ASIO_DECL asio::error_code do_connect( base_implementation_type& impl, const void* addr, asio::error_code& ec); // Helper function to start an asynchronous connect. ASIO_DECL void start_connect_op( base_implementation_type& impl, const void* addr, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous send. ASIO_DECL std::size_t do_send( base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous send. ASIO_DECL void start_send_op(base_implementation_type& impl, const asio::const_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // Helper function to perform a synchronous receive. ASIO_DECL std::size_t do_receive( base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, asio::error_code& ec); // Helper function to start an asynchronous receive. ASIO_DECL void start_receive_op(base_implementation_type& impl, const asio::mutable_buffer& data, socket_base::message_flags flags, winrt_async_op* op, bool is_continuation); // The scheduler implementation used for delivering completions. #if defined(ASIO_HAS_IOCP) typedef class win_iocp_io_context scheduler_impl; #else typedef class scheduler scheduler_impl; #endif scheduler_impl& scheduler_; // The manager that keeps track of outstanding operations. winrt_async_manager& async_manager_; // Mutex to protect access to the linked list of implementations. asio::detail::mutex mutex_; // The head of a linked list of all implementations. base_implementation_type* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/winrt_ssocket_service_base.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_WINDOWS_RUNTIME) #endif // ASIO_DETAIL_WINRT_SSOCKET_SERVICE_BASE_HPP galera-4-26.4.25/asio/asio/detail/signal_op.hpp000644 000164 177776 00000002040 15107057155 022324 0ustar00jenkinsnogroup000000 000000 // // detail/signal_op.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_OP_HPP #define ASIO_DETAIL_SIGNAL_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class signal_op : public operation { public: // The error code to be passed to the completion handler. asio::error_code ec_; // The signal number to be passed to the completion handler. int signal_number_; protected: signal_op(func_type func) : operation(func), signal_number_(0) { } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_DETAIL_SIGNAL_OP_HPP galera-4-26.4.25/asio/asio/detail/timer_queue_ptime.hpp000644 000164 177776 00000005613 15107057155 024104 0ustar00jenkinsnogroup000000 000000 // // detail/timer_queue_ptime.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #define ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/time_traits.hpp" #include "asio/detail/timer_queue.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { struct forwarding_posix_time_traits : time_traits {}; // Template specialisation for the commonly used instantation. template <> class timer_queue > : public timer_queue_base { public: // The time type. typedef boost::posix_time::ptime time_type; // The duration type. typedef boost::posix_time::time_duration duration_type; // Per-timer data. typedef timer_queue::per_timer_data per_timer_data; // Constructor. ASIO_DECL timer_queue(); // Destructor. ASIO_DECL virtual ~timer_queue(); // Add a new timer to the queue. Returns true if this is the timer that is // earliest in the queue, in which case the reactor's event demultiplexing // function call may need to be interrupted and restarted. ASIO_DECL bool enqueue_timer(const time_type& time, per_timer_data& timer, wait_op* op); // Whether there are no timers in the queue. ASIO_DECL virtual bool empty() const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_msec(long max_duration) const; // Get the time for the timer that is earliest in the queue. ASIO_DECL virtual long wait_duration_usec(long max_duration) const; // Dequeue all timers not later than the current time. ASIO_DECL virtual void get_ready_timers(op_queue& ops); // Dequeue all timers. ASIO_DECL virtual void get_all_timers(op_queue& ops); // Cancel and dequeue operations for the given timer. ASIO_DECL std::size_t cancel_timer( per_timer_data& timer, op_queue& ops, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move operations from one timer to another, empty timer. ASIO_DECL void move_timer(per_timer_data& target, per_timer_data& source); private: timer_queue impl_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/timer_queue_ptime.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #endif // ASIO_DETAIL_TIMER_QUEUE_PTIME_HPP galera-4-26.4.25/asio/asio/detail/signal_init.hpp000644 000164 177776 00000001727 15107057155 022664 0ustar00jenkinsnogroup000000 000000 // // detail/signal_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_SIGNAL_INIT_HPP #define ASIO_DETAIL_SIGNAL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #include #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class signal_init { public: // Constructor. signal_init() { std::signal(Signal, SIG_IGN); } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) #endif // ASIO_DETAIL_SIGNAL_INIT_HPP galera-4-26.4.25/asio/asio/detail/win_fenced_block.hpp000644 000164 177776 00000003737 15107057155 023642 0ustar00jenkinsnogroup000000 000000 // // detail/win_fenced_block.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #define ASIO_DETAIL_WIN_FENCED_BLOCK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_WINDOWS) && !defined(UNDER_CE) #include "asio/detail/socket_types.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class win_fenced_block : private noncopyable { public: enum half_t { half }; enum full_t { full }; // Constructor for a half fenced block. explicit win_fenced_block(half_t) { } // Constructor for a full fenced block. explicit win_fenced_block(full_t) { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } // Destructor. ~win_fenced_block() { #if defined(__BORLANDC__) LONG barrier = 0; ::InterlockedExchange(&barrier, 1); #elif defined(ASIO_MSVC) \ && ((ASIO_MSVC < 1400) || !defined(MemoryBarrier)) # if defined(_M_IX86) # pragma warning(push) # pragma warning(disable:4793) LONG barrier; __asm { xchg barrier, eax } # pragma warning(pop) # endif // defined(_M_IX86) #else MemoryBarrier(); #endif } }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_WINDOWS) && !defined(UNDER_CE) #endif // ASIO_DETAIL_WIN_FENCED_BLOCK_HPP galera-4-26.4.25/asio/asio/detail/win_iocp_overlapped_op.hpp000644 000164 177776 00000005502 15107057155 025105 0ustar00jenkinsnogroup000000 000000 // // detail/win_iocp_overlapped_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #define ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_IOCP) #include "asio/error.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/fenced_block.hpp" #include "asio/detail/handler_alloc_helpers.hpp" #include "asio/detail/handler_invoke_helpers.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/operation.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template class win_iocp_overlapped_op : public operation { public: ASIO_DEFINE_HANDLER_PTR(win_iocp_overlapped_op); win_iocp_overlapped_op(Handler& handler, const IoExecutor& io_ex) : operation(&win_iocp_overlapped_op::do_complete), handler_(ASIO_MOVE_CAST(Handler)(handler)), io_executor_(io_ex) { handler_work::start(handler_, io_executor_); } static void do_complete(void* owner, operation* base, const asio::error_code& ec, std::size_t bytes_transferred) { // Take ownership of the operation object. win_iocp_overlapped_op* o(static_cast(base)); ptr p = { asio::detail::addressof(o->handler_), o, o }; handler_work w(o->handler_, o->io_executor_); ASIO_HANDLER_COMPLETION((*o)); // Make a copy of the handler so that the memory can be deallocated before // the upcall is made. Even if we're not about to make an upcall, a // sub-object of the handler may be the true owner of the memory associated // with the handler. Consequently, a local copy of the handler is required // to ensure that any owning sub-object remains valid until after we have // deallocated the memory here. detail::binder2 handler(o->handler_, ec, bytes_transferred); p.h = asio::detail::addressof(handler.handler_); p.reset(); // Make the upcall if required. if (owner) { fenced_block b(fenced_block::half); ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_)); w.complete(handler, handler.handler_); ASIO_HANDLER_INVOCATION_END; } } private: Handler handler_; IoExecutor io_executor_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_IOCP) #endif // ASIO_DETAIL_WIN_IOCP_OVERLAPPED_OP_HPP galera-4-26.4.25/asio/asio/detail/strand_executor_service.hpp000644 000164 177776 00000011316 15107057155 025310 0ustar00jenkinsnogroup000000 000000 // // detail/strand_executor_service.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP #define ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/atomic_count.hpp" #include "asio/detail/executor_op.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/scheduler_operation.hpp" #include "asio/detail/scoped_ptr.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // Default service implementation for a strand. class strand_executor_service : public execution_context_service_base { public: // The underlying implementation of a strand. class strand_impl { public: ASIO_DECL ~strand_impl(); private: friend class strand_executor_service; // Mutex to protect access to internal data. mutex* mutex_; // Indicates whether the strand is currently "locked" by a handler. This // means that there is a handler upcall in progress, or that the strand // itself has been scheduled in order to invoke some pending handlers. bool locked_; // Indicates that the strand has been shut down and will accept no further // handlers. bool shutdown_; // The handlers that are waiting on the strand but should not be run until // after the next time the strand is scheduled. This queue must only be // modified while the mutex is locked. op_queue waiting_queue_; // The handlers that are ready to be run. Logically speaking, these are the // handlers that hold the strand's lock. The ready queue is only modified // from within the strand and so may be accessed without locking the mutex. op_queue ready_queue_; // Pointers to adjacent handle implementations in linked list. strand_impl* next_; strand_impl* prev_; // The strand service in where the implementation is held. strand_executor_service* service_; }; typedef shared_ptr implementation_type; // Construct a new strand service for the specified context. ASIO_DECL explicit strand_executor_service(execution_context& context); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Create a new strand_executor implementation. ASIO_DECL implementation_type create_implementation(); // Request invocation of the given function. template static void dispatch(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a); // Request invocation of the given function and return immediately. template static void post(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a); // Request invocation of the given function and return immediately. template static void defer(const implementation_type& impl, Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a); // Determine whether the strand is running in the current thread. ASIO_DECL static bool running_in_this_thread( const implementation_type& impl); private: friend class strand_impl; template class invoker; // Adds a function to the strand. Returns true if it acquires the lock. ASIO_DECL static bool enqueue(const implementation_type& impl, scheduler_operation* op); // Mutex to protect access to the service-wide state. mutex mutex_; // Number of mutexes shared between all strand objects. enum { num_mutexes = 193 }; // Pool of mutexes. scoped_ptr mutexes_[num_mutexes]; // Extra value used when hashing to prevent recycled memory locations from // getting the same mutex. std::size_t salt_; // The head of a linked list of all implementations. strand_impl* impl_list_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/strand_executor_service.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/strand_executor_service.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_DETAIL_STRAND_EXECUTOR_SERVICE_HPP galera-4-26.4.25/asio/asio/detail/posix_thread.hpp000644 000164 177776 00000003762 15107057155 023056 0ustar00jenkinsnogroup000000 000000 // // detail/posix_thread.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_POSIX_THREAD_HPP #define ASIO_DETAIL_POSIX_THREAD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_PTHREADS) #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { extern "C" { ASIO_DECL void* asio_detail_posix_thread_function(void* arg); } class posix_thread : private noncopyable { public: // Constructor. template posix_thread(Function f, unsigned int = 0) : joined_(false) { start_thread(new func(f)); } // Destructor. ASIO_DECL ~posix_thread(); // Wait for the thread to exit. ASIO_DECL void join(); // Get number of CPUs. ASIO_DECL static std::size_t hardware_concurrency(); private: friend void* asio_detail_posix_thread_function(void* arg); class func_base { public: virtual ~func_base() {} virtual void run() = 0; }; struct auto_func_base_ptr { func_base* ptr; ~auto_func_base_ptr() { delete ptr; } }; template class func : public func_base { public: func(Function f) : f_(f) { } virtual void run() { f_(); } private: Function f_; }; ASIO_DECL void start_thread(func_base* arg); ::pthread_t thread_; bool joined_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/posix_thread.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_PTHREADS) #endif // ASIO_DETAIL_POSIX_THREAD_HPP galera-4-26.4.25/asio/asio/detail/functional.hpp000644 000164 177776 00000001631 15107057155 022520 0ustar00jenkinsnogroup000000 000000 // // detail/functional.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_FUNCTIONAL_HPP #define ASIO_DETAIL_FUNCTIONAL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #if !defined(ASIO_HAS_STD_FUNCTION) # include #endif // !defined(ASIO_HAS_STD_FUNCTION) namespace asio { namespace detail { #if defined(ASIO_HAS_STD_FUNCTION) using std::function; #else // defined(ASIO_HAS_STD_FUNCTION) using boost::function; #endif // defined(ASIO_HAS_STD_FUNCTION) } // namespace detail } // namespace asio #endif // ASIO_DETAIL_FUNCTIONAL_HPP galera-4-26.4.25/asio/asio/detail/kqueue_reactor.hpp000644 000164 177776 00000017422 15107057155 023401 0ustar00jenkinsnogroup000000 000000 // // detail/kqueue_reactor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2005 Stefan Arentz (stefan at soze dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_KQUEUE_REACTOR_HPP #define ASIO_DETAIL_KQUEUE_REACTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_KQUEUE) #include #include #include #include #include "asio/detail/limits.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/object_pool.hpp" #include "asio/detail/op_queue.hpp" #include "asio/detail/reactor_op.hpp" #include "asio/detail/select_interrupter.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/timer_queue_base.hpp" #include "asio/detail/timer_queue_set.hpp" #include "asio/detail/wait_op.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" // Older versions of Mac OS X may not define EV_OOBAND. #if !defined(EV_OOBAND) # define EV_OOBAND EV_FLAG1 #endif // !defined(EV_OOBAND) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { class scheduler; class kqueue_reactor : public execution_context_service_base { private: // The mutex type used by this reactor. typedef conditionally_enabled_mutex mutex; public: enum op_types { read_op = 0, write_op = 1, connect_op = 1, except_op = 2, max_ops = 3 }; // Per-descriptor queues. struct descriptor_state { descriptor_state(bool locking) : mutex_(locking) {} friend class kqueue_reactor; friend class object_pool_access; descriptor_state* next_; descriptor_state* prev_; mutex mutex_; int descriptor_; int num_kevents_; // 1 == read only, 2 == read and write op_queue op_queue_[max_ops]; bool shutdown_; }; // Per-descriptor data. typedef descriptor_state* per_descriptor_data; // Constructor. ASIO_DECL kqueue_reactor(asio::execution_context& ctx); // Destructor. ASIO_DECL ~kqueue_reactor(); // Destroy all user-defined handler objects owned by the service. ASIO_DECL void shutdown(); // Recreate internal descriptors following a fork. ASIO_DECL void notify_fork( asio::execution_context::fork_event fork_ev); // Initialise the task. ASIO_DECL void init_task(); // Register a socket with the reactor. Returns 0 on success, system error // code on failure. ASIO_DECL int register_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data); // Register a descriptor with an associated single operation. Returns 0 on // success, system error code on failure. ASIO_DECL int register_internal_descriptor( int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op); // Move descriptor registration from one descriptor_data object to another. ASIO_DECL void move_descriptor(socket_type descriptor, per_descriptor_data& target_descriptor_data, per_descriptor_data& source_descriptor_data); // Post a reactor operation for immediate completion. void post_immediate_completion(reactor_op* op, bool is_continuation) { scheduler_.post_immediate_completion(op, is_continuation); } // Start a new operation. The reactor operation will be performed when the // given descriptor is flagged as ready, or an error has occurred. ASIO_DECL void start_op(int op_type, socket_type descriptor, per_descriptor_data& descriptor_data, reactor_op* op, bool is_continuation, bool allow_speculative); // Cancel all operations associated with the given descriptor. The // handlers associated with the descriptor will be invoked with the // operation_aborted error. ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data& descriptor_data); // Cancel any operations that are running against the descriptor and remove // its registration from the reactor. The reactor resources associated with // the descriptor must be released by calling cleanup_descriptor_data. ASIO_DECL void deregister_descriptor(socket_type descriptor, per_descriptor_data& descriptor_data, bool closing); // Remove the descriptor's registration from the reactor. The reactor // resources associated with the descriptor must be released by calling // cleanup_descriptor_data. ASIO_DECL void deregister_internal_descriptor( socket_type descriptor, per_descriptor_data& descriptor_data); // Perform any post-deregistration cleanup tasks associated with the // descriptor data. ASIO_DECL void cleanup_descriptor_data( per_descriptor_data& descriptor_data); // Add a new timer queue to the reactor. template void add_timer_queue(timer_queue& queue); // Remove a timer queue from the reactor. template void remove_timer_queue(timer_queue& queue); // Schedule a new operation in the given timer queue to expire at the // specified absolute time. template void schedule_timer(timer_queue& queue, const typename Time_Traits::time_type& time, typename timer_queue::per_timer_data& timer, wait_op* op); // Cancel the timer operations associated with the given token. Returns the // number of operations that have been posted or dispatched. template std::size_t cancel_timer(timer_queue& queue, typename timer_queue::per_timer_data& timer, std::size_t max_cancelled = (std::numeric_limits::max)()); // Move the timer operations associated with the given timer. template void move_timer(timer_queue& queue, typename timer_queue::per_timer_data& target, typename timer_queue::per_timer_data& source); // Run the kqueue loop. ASIO_DECL void run(long usec, op_queue& ops); // Interrupt the kqueue loop. ASIO_DECL void interrupt(); private: // Create the kqueue file descriptor. Throws an exception if the descriptor // cannot be created. ASIO_DECL static int do_kqueue_create(); // Allocate a new descriptor state object. ASIO_DECL descriptor_state* allocate_descriptor_state(); // Free an existing descriptor state object. ASIO_DECL void free_descriptor_state(descriptor_state* s); // Helper function to add a new timer queue. ASIO_DECL void do_add_timer_queue(timer_queue_base& queue); // Helper function to remove a timer queue. ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue); // Get the timeout value for the kevent call. ASIO_DECL timespec* get_timeout(long usec, timespec& ts); // The scheduler used to post completions. scheduler& scheduler_; // Mutex to protect access to internal data. mutex mutex_; // The kqueue file descriptor. int kqueue_fd_; // The interrupter is used to break a blocking kevent call. select_interrupter interrupter_; // The timer queues. timer_queue_set timer_queues_; // Whether the service has been shut down. bool shutdown_; // Mutex to protect access to the registered descriptors. mutex registered_descriptors_mutex_; // Keep track of all registered descriptors. object_pool registered_descriptors_; }; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/impl/kqueue_reactor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/detail/impl/kqueue_reactor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // defined(ASIO_HAS_KQUEUE) #endif // ASIO_DETAIL_KQUEUE_REACTOR_HPP galera-4-26.4.25/asio/asio/detail/type_traits.hpp000644 000164 177776 00000005105 15107057155 022725 0ustar00jenkinsnogroup000000 000000 // // detail/type_traits.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DETAIL_TYPE_TRAITS_HPP #define ASIO_DETAIL_TYPE_TRAITS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_STD_TYPE_TRAITS) # include #else // defined(ASIO_HAS_TYPE_TRAITS) # include # include # include # include # include # include # include # include # include # include # include # include # include # include #endif // defined(ASIO_HAS_TYPE_TRAITS) namespace asio { #if defined(ASIO_HAS_STD_TYPE_TRAITS) using std::add_const; using std::conditional; using std::decay; using std::enable_if; using std::false_type; using std::integral_constant; using std::is_base_of; using std::is_class; using std::is_const; using std::is_convertible; using std::is_function; using std::is_same; using std::remove_pointer; using std::remove_reference; #if defined(ASIO_HAS_STD_INVOKE_RESULT) template struct result_of; template struct result_of : std::invoke_result {}; #else // defined(ASIO_HAS_STD_INVOKE_RESULT) using std::result_of; #endif // defined(ASIO_HAS_STD_INVOKE_RESULT) using std::true_type; #else // defined(ASIO_HAS_STD_TYPE_TRAITS) using boost::add_const; template struct enable_if : boost::enable_if_c {}; using boost::conditional; using boost::decay; using boost::false_type; using boost::integral_constant; using boost::is_base_of; using boost::is_class; using boost::is_const; using boost::is_convertible; using boost::is_function; using boost::is_same; using boost::remove_pointer; using boost::remove_reference; using boost::result_of; using boost::true_type; #endif // defined(ASIO_HAS_STD_TYPE_TRAITS) } // namespace asio #endif // ASIO_DETAIL_TYPE_TRAITS_HPP galera-4-26.4.25/asio/asio/use_awaitable.hpp000644 000164 177776 00000003767 15107057155 021735 0ustar00jenkinsnogroup000000 000000 // // use_awaitable.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_USE_AWAITABLE_HPP #define ASIO_USE_AWAITABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #include "asio/awaitable.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A completion token that represents the currently executing coroutine. /** * The @c use_awaitable_t class, with its value @c use_awaitable, is used to * represent the currently executing coroutine. This completion token may be * passed as a handler to an asynchronous operation. For example: * * @code awaitable my_coroutine() * { * std::size_t n = co_await my_socket.async_read_some(buffer, use_awaitable); * ... * } @endcode * * When used with co_await, the initiating function (@c async_read_some in the * above example) suspends the current coroutine. The coroutine is resumed when * the asynchronous operation completes, and the result of the operation is * returned. */ template struct use_awaitable_t { ASIO_CONSTEXPR use_awaitable_t() { } }; /// A completion token object that represents the currently executing coroutine. /** * See the documentation for asio::use_awaitable_t for a usage example. */ #if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION) constexpr use_awaitable_t<> use_awaitable; #elif defined(ASIO_MSVC) __declspec(selectany) use_awaitable_t<> use_awaitable; #endif } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/use_awaitable.hpp" #endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_USE_AWAITABLE_HPP galera-4-26.4.25/asio/asio/read_at.hpp000644 000164 177776 00000062525 15107057155 020524 0ustar00jenkinsnogroup000000 000000 // // read_at.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_AT_HPP #define ASIO_READ_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/cstdint.hpp" #include "asio/error.hpp" #if !defined(ASIO_NO_EXTENSIONS) # include "asio/basic_streambuf_fwd.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup read_at asio::read_at * * @brief The @c read_at function is a composed operation that reads a certain * amount of data at the specified offset before returning. */ /*@{*/ /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, * asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read_at(d, 42, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all()); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read_at( * d, 42, b, * asio::transfer_all(), ec); @endcode */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, asio::error_code& ec); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition); /// Attempt to read a certain amount of data at the specified offset before /// returning. /** * This function is used to read a certain number of bytes of data from a * random access device at the specified offset. The call will block until one * of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * read_some_at function. * * @param d The device from which the data is to be read. The type must support * the SyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's read_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read_at(SyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ /** * @defgroup async_read_at asio::async_read_at * * @brief The @c async_read_at function is a composed asynchronous operation * that reads a certain amount of data at the specified offset. */ /*@{*/ /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * asio::async_read_at(d, 42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * device. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::async_read_at(d, 42, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload is equivalent to calling: * @code asio::async_read_at( * d, 42, b, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data at the /// specified offset. /** * This function is used to asynchronously read a certain number of bytes of * data from a random access device at the specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * async_read_some_at function. * * @param d The device from which the data is to be read. The type must support * the AsyncRandomAccessReadDevice concept. * * @param offset The offset at which the data will be read. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the device's async_read_some_at function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes copied into the buffers. If an error * // occurred, this will be the number of bytes successfully * // transferred prior to the error. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_at(AsyncRandomAccessReadDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read_at.hpp" #endif // ASIO_READ_AT_HPP galera-4-26.4.25/asio/asio/awaitable.hpp000644 000164 177776 00000005310 15107057155 021043 0ustar00jenkinsnogroup000000 000000 // // awaitable.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_AWAITABLE_HPP #define ASIO_AWAITABLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #include #include "asio/executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { using std::experimental::coroutine_handle; using std::experimental::suspend_always; template class awaitable_thread; template class awaitable_frame; } // namespace detail /// The return type of a coroutine or asynchronous operation. template class awaitable { public: /// The type of the awaited value. typedef T value_type; /// The executor type that will be used for the coroutine. typedef Executor executor_type; /// Default constructor. constexpr awaitable() noexcept : frame_(nullptr) { } /// Move constructor. awaitable(awaitable&& other) noexcept : frame_(std::exchange(other.frame_, nullptr)) { } /// Destructor ~awaitable() { if (frame_) frame_->destroy(); } /// Checks if the awaitable refers to a future result. bool valid() const noexcept { return !!frame_; } #if !defined(GENERATING_DOCUMENTATION) // Support for co_await keyword. bool await_ready() const noexcept { return false; } // Support for co_await keyword. template void await_suspend( detail::coroutine_handle> h) { frame_->push_frame(&h.promise()); } // Support for co_await keyword. T await_resume() { return frame_->get(); } #endif // !defined(GENERATING_DOCUMENTATION) private: template friend class detail::awaitable_thread; template friend class detail::awaitable_frame; // Not copy constructible or copy assignable. awaitable(const awaitable&) = delete; awaitable& operator=(const awaitable&) = delete; // Construct the awaitable from a coroutine's frame object. explicit awaitable(detail::awaitable_frame* a) : frame_(a) { } detail::awaitable_frame* frame_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/awaitable.hpp" #endif // defined(ASIO_HAS_CO_AWAIT) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_AWAITABLE_HPP galera-4-26.4.25/asio/asio/buffered_write_stream.hpp000644 000164 177776 00000015706 15107057155 023473 0ustar00jenkinsnogroup000000 000000 // // buffered_write_stream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_WRITE_STREAM_HPP #define ASIO_BUFFERED_WRITE_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/buffered_write_stream_fwd.hpp" #include "asio/buffer.hpp" #include "asio/completion_condition.hpp" #include "asio/detail/bind_handler.hpp" #include "asio/detail/buffered_stream_storage.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/write.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Adds buffering to the write-related operations of a stream. /** * The buffered_write_stream class template can be used to add buffering to the * synchronous and asynchronous write operations of a stream. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class buffered_write_stream : private noncopyable { public: /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the executor associated with the object. typedef typename lowest_layer_type::executor_type executor_type; #if defined(GENERATING_DOCUMENTATION) /// The default buffer size. static const std::size_t default_buffer_size = implementation_defined; #else ASIO_STATIC_CONSTANT(std::size_t, default_buffer_size = 1024); #endif /// Construct, passing the specified argument to initialise the next layer. template explicit buffered_write_stream(Arg& a) : next_layer_(a), storage_(default_buffer_size) { } /// Construct, passing the specified argument to initialise the next layer. template buffered_write_stream(Arg& a, std::size_t buffer_size) : next_layer_(a), storage_(buffer_size) { } /// Get a reference to the next layer. next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a const reference to the lowest layer. const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return next_layer_.lowest_layer().get_executor(); } /// Close the stream. void close() { next_layer_.close(); } /// Close the stream. ASIO_SYNC_OP_VOID close(asio::error_code& ec) { next_layer_.close(ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation. Throws an /// exception on failure. std::size_t flush(); /// Flush all data from the buffer to the next layer. Returns the number of /// bytes written to the next layer on the last write operation, or 0 if an /// error occurred. std::size_t flush(asio::error_code& ec); /// Start an asynchronous flush. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_flush(ASIO_MOVE_ARG(WriteHandler) handler); /// Write the given data to the stream. Returns the number of bytes written. /// Throws an exception on failure. template std::size_t write_some(const ConstBufferSequence& buffers); /// Write the given data to the stream. Returns the number of bytes written, /// or 0 if an error occurred and the error handler did not throw. template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec); /// Start an asynchronous write. The data being written must be valid for the /// lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler); /// Read some data from the stream. Returns the number of bytes read. Throws /// an exception on failure. template std::size_t read_some(const MutableBufferSequence& buffers) { return next_layer_.read_some(buffers); } /// Read some data from the stream. Returns the number of bytes read or 0 if /// an error occurred. template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return next_layer_.read_some(buffers, ec); } /// Start an asynchronous read. The buffer into which the data will be read /// must be valid for the lifetime of the asynchronous operation. template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return next_layer_.async_read_some(buffers, ASIO_MOVE_CAST(ReadHandler)(handler)); } /// Peek at the incoming data on the stream. Returns the number of bytes read. /// Throws an exception on failure. template std::size_t peek(const MutableBufferSequence& buffers) { return next_layer_.peek(buffers); } /// Peek at the incoming data on the stream. Returns the number of bytes read, /// or 0 if an error occurred. template std::size_t peek(const MutableBufferSequence& buffers, asio::error_code& ec) { return next_layer_.peek(buffers, ec); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail() { return next_layer_.in_avail(); } /// Determine the amount of data that may be read without blocking. std::size_t in_avail(asio::error_code& ec) { return next_layer_.in_avail(ec); } private: /// Copy data into the internal buffer from the specified source buffer. /// Returns the number of bytes copied. template std::size_t copy(const ConstBufferSequence& buffers); /// The next layer. Stream next_layer_; // The data in the buffer. detail::buffered_stream_storage storage_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/buffered_write_stream.hpp" #endif // ASIO_BUFFERED_WRITE_STREAM_HPP galera-4-26.4.25/asio/asio/bind_executor.hpp000644 000164 177776 00000037415 15107057155 021757 0ustar00jenkinsnogroup000000 000000 // // bind_executor.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BIND_EXECUTOR_HPP #define ASIO_BIND_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/associated_executor.hpp" #include "asio/associated_allocator.hpp" #include "asio/async_result.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/uses_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template struct executor_binder_check { typedef void type; }; // Helper to automatically define nested typedef result_type. template struct executor_binder_result_type { protected: typedef void result_type_or_void; }; template struct executor_binder_result_type::type> { typedef typename T::result_type result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; template struct executor_binder_result_type { typedef R result_type; protected: typedef result_type result_type_or_void; }; // Helper to automatically define nested typedef argument_type. template struct executor_binder_argument_type {}; template struct executor_binder_argument_type::type> { typedef typename T::argument_type argument_type; }; template struct executor_binder_argument_type { typedef A1 argument_type; }; template struct executor_binder_argument_type { typedef A1 argument_type; }; // Helper to automatically define nested typedefs first_argument_type and // second_argument_type. template struct executor_binder_argument_types {}; template struct executor_binder_argument_types::type> { typedef typename T::first_argument_type first_argument_type; typedef typename T::second_argument_type second_argument_type; }; template struct executor_binder_argument_type { typedef A1 first_argument_type; typedef A2 second_argument_type; }; template struct executor_binder_argument_type { typedef A1 first_argument_type; typedef A2 second_argument_type; }; // Helper to: // - Apply the empty base optimisation to the executor. // - Perform uses_executor construction of the target type, if required. template class executor_binder_base; template class executor_binder_base : protected Executor { protected: template executor_binder_base(ASIO_MOVE_ARG(E) e, ASIO_MOVE_ARG(U) u) : executor_(ASIO_MOVE_CAST(E)(e)), target_(executor_arg_t(), executor_, ASIO_MOVE_CAST(U)(u)) { } Executor executor_; T target_; }; template class executor_binder_base { protected: template executor_binder_base(ASIO_MOVE_ARG(E) e, ASIO_MOVE_ARG(U) u) : executor_(ASIO_MOVE_CAST(E)(e)), target_(ASIO_MOVE_CAST(U)(u)) { } Executor executor_; T target_; }; // Helper to enable SFINAE on zero-argument operator() below. template struct executor_binder_result_of0 { typedef void type; }; template struct executor_binder_result_of0::type>::type> { typedef typename result_of::type type; }; } // namespace detail /// A call wrapper type to bind an executor of type @c Executor to an object of /// type @c T. template class executor_binder #if !defined(GENERATING_DOCUMENTATION) : public detail::executor_binder_result_type, public detail::executor_binder_argument_type, public detail::executor_binder_argument_types, private detail::executor_binder_base< T, Executor, uses_executor::value> #endif // !defined(GENERATING_DOCUMENTATION) { public: /// The type of the target object. typedef T target_type; /// The type of the associated executor. typedef Executor executor_type; #if defined(GENERATING_DOCUMENTATION) /// The return type if a function. /** * The type of @c result_type is based on the type @c T of the wrapper's * target object: * * @li if @c T is a pointer to function type, @c result_type is a synonym for * the return type of @c T; * * @li if @c T is a class type with a member type @c result_type, then @c * result_type is a synonym for @c T::result_type; * * @li otherwise @c result_type is not defined. */ typedef see_below result_type; /// The type of the function's argument. /** * The type of @c argument_type is based on the type @c T of the wrapper's * target object: * * @li if @c T is a pointer to a function type accepting a single argument, * @c argument_type is a synonym for the return type of @c T; * * @li if @c T is a class type with a member type @c argument_type, then @c * argument_type is a synonym for @c T::argument_type; * * @li otherwise @c argument_type is not defined. */ typedef see_below argument_type; /// The type of the function's first argument. /** * The type of @c first_argument_type is based on the type @c T of the * wrapper's target object: * * @li if @c T is a pointer to a function type accepting two arguments, @c * first_argument_type is a synonym for the return type of @c T; * * @li if @c T is a class type with a member type @c first_argument_type, * then @c first_argument_type is a synonym for @c T::first_argument_type; * * @li otherwise @c first_argument_type is not defined. */ typedef see_below first_argument_type; /// The type of the function's second argument. /** * The type of @c second_argument_type is based on the type @c T of the * wrapper's target object: * * @li if @c T is a pointer to a function type accepting two arguments, @c * second_argument_type is a synonym for the return type of @c T; * * @li if @c T is a class type with a member type @c first_argument_type, * then @c second_argument_type is a synonym for @c T::second_argument_type; * * @li otherwise @c second_argument_type is not defined. */ typedef see_below second_argument_type; #endif // defined(GENERATING_DOCUMENTATION) /// Construct an executor wrapper for the specified object. /** * This constructor is only valid if the type @c T is constructible from type * @c U. */ template executor_binder(executor_arg_t, const executor_type& e, ASIO_MOVE_ARG(U) u) : base_type(e, ASIO_MOVE_CAST(U)(u)) { } /// Copy constructor. executor_binder(const executor_binder& other) : base_type(other.get_executor(), other.get()) { } /// Construct a copy, but specify a different executor. executor_binder(executor_arg_t, const executor_type& e, const executor_binder& other) : base_type(e, other.get()) { } /// Construct a copy of a different executor wrapper type. /** * This constructor is only valid if the @c Executor type is constructible * from type @c OtherExecutor, and the type @c T is constructible from type * @c U. */ template executor_binder(const executor_binder& other) : base_type(other.get_executor(), other.get()) { } /// Construct a copy of a different executor wrapper type, but specify a /// different executor. /** * This constructor is only valid if the type @c T is constructible from type * @c U. */ template executor_binder(executor_arg_t, const executor_type& e, const executor_binder& other) : base_type(e, other.get()) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. executor_binder(executor_binder&& other) : base_type(ASIO_MOVE_CAST(executor_type)(other.get_executor()), ASIO_MOVE_CAST(T)(other.get())) { } /// Move construct the target object, but specify a different executor. executor_binder(executor_arg_t, const executor_type& e, executor_binder&& other) : base_type(e, ASIO_MOVE_CAST(T)(other.get())) { } /// Move construct from a different executor wrapper type. template executor_binder(executor_binder&& other) : base_type(ASIO_MOVE_CAST(OtherExecutor)(other.get_executor()), ASIO_MOVE_CAST(U)(other.get())) { } /// Move construct from a different executor wrapper type, but specify a /// different executor. template executor_binder(executor_arg_t, const executor_type& e, executor_binder&& other) : base_type(e, ASIO_MOVE_CAST(U)(other.get())) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. ~executor_binder() { } /// Obtain a reference to the target object. target_type& get() ASIO_NOEXCEPT { return this->target_; } /// Obtain a reference to the target object. const target_type& get() const ASIO_NOEXCEPT { return this->target_; } /// Obtain the associated executor. executor_type get_executor() const ASIO_NOEXCEPT { return this->executor_; } #if defined(GENERATING_DOCUMENTATION) template auto operator()(Args&& ...); template auto operator()(Args&& ...) const; #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) /// Forwarding function call operator. template typename result_of::type operator()( ASIO_MOVE_ARG(Args)... args) { return this->target_(ASIO_MOVE_CAST(Args)(args)...); } /// Forwarding function call operator. template typename result_of::type operator()( ASIO_MOVE_ARG(Args)... args) const { return this->target_(ASIO_MOVE_CAST(Args)(args)...); } #elif defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER) typename detail::executor_binder_result_of0::type operator()() { return this->target_(); } typename detail::executor_binder_result_of0::type operator()() const { return this->target_(); } #define ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF(n) \ template \ typename result_of::type operator()( \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template \ typename result_of::type operator()( \ ASIO_VARIADIC_MOVE_PARAMS(n)) const \ { \ return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF) #undef ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF #else // defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER) typedef typename detail::executor_binder_result_type::result_type_or_void result_type_or_void; result_type_or_void operator()() { return this->target_(); } result_type_or_void operator()() const { return this->target_(); } #define ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF(n) \ template \ result_type_or_void operator()( \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template \ result_type_or_void operator()( \ ASIO_VARIADIC_MOVE_PARAMS(n)) const \ { \ return this->target_(ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF) #undef ASIO_PRIVATE_BIND_EXECUTOR_CALL_DEF #endif // defined(ASIO_HAS_STD_TYPE_TRAITS) && !defined(_MSC_VER) private: typedef detail::executor_binder_base::value> base_type; }; /// Associate an object of type @c T with an executor of type @c Executor. template inline executor_binder::type, Executor> bind_executor(const Executor& ex, ASIO_MOVE_ARG(T) t, typename enable_if::value>::type* = 0) { return executor_binder::type, Executor>( executor_arg_t(), ex, ASIO_MOVE_CAST(T)(t)); } /// Associate an object of type @c T with an execution context's executor. template inline executor_binder::type, typename ExecutionContext::executor_type> bind_executor(ExecutionContext& ctx, ASIO_MOVE_ARG(T) t, typename enable_if::value>::type* = 0) { return executor_binder::type, typename ExecutionContext::executor_type>( executor_arg_t(), ctx.get_executor(), ASIO_MOVE_CAST(T)(t)); } #if !defined(GENERATING_DOCUMENTATION) template struct uses_executor, Executor> : true_type {}; template class async_result, Signature> { public: typedef executor_binder< typename async_result::completion_handler_type, Executor> completion_handler_type; typedef typename async_result::return_type return_type; explicit async_result(executor_binder& b) : target_(b.get()) { } return_type get() { return target_.get(); } private: async_result(const async_result&) ASIO_DELETED; async_result& operator=(const async_result&) ASIO_DELETED; async_result target_; }; template struct associated_allocator, Allocator> { typedef typename associated_allocator::type type; static type get(const executor_binder& b, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(b.get(), a); } }; template struct associated_executor, Executor1> { typedef Executor type; static type get(const executor_binder& b, const Executor1& = Executor1()) ASIO_NOEXCEPT { return b.get_executor(); } }; #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BIND_EXECUTOR_HPP galera-4-26.4.25/asio/asio/defer.hpp000644 000164 177776 00000010465 15107057155 020206 0ustar00jenkinsnogroup000000 000000 // // defer.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DEFER_HPP #define ASIO_DEFER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the object's associated * executor. The function object is queued for execution, and is never called * from the current thread prior to returning from defer(). * * The use of @c defer(), rather than @ref post(), indicates the caller's * preference that the executor defer the queueing of the function object. This * may allow the executor to optimise queueing for cases when the function * object represents a continuation of the current call context. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex by performing * get_associated_executor(handler). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Performs ex.defer(std::move(handler), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( ASIO_MOVE_ARG(CompletionToken) token); /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the specified executor. * The function object is queued for execution, and is never called from the * current thread prior to returning from defer(). * * The use of @c defer(), rather than @ref post(), indicates the caller's * preference that the executor defer the queueing of the function object. This * may allow the executor to optimise queueing for cases when the function * object represents a continuation of the current call context. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex1 by performing * get_associated_executor(handler). * * @li Creates a work object @c w by performing make_work(ex1). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Constructs a function object @c f with a function call operator that * performs ex1.dispatch(std::move(handler), alloc) followed by * w.reset(). * * @li Performs Executor(ex).defer(std::move(f), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); /// Submits a completion token or function object for execution. /** * @returns defer(ctx.get_executor(), forward(token)). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) defer( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/defer.hpp" #endif // ASIO_DEFER_HPP galera-4-26.4.25/asio/asio/steady_timer.hpp000644 000164 177776 00000002303 15107057155 021602 0ustar00jenkinsnogroup000000 000000 // // steady_timer.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STEADY_TIMER_HPP #define ASIO_STEADY_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #include "asio/basic_waitable_timer.hpp" #include "asio/detail/chrono.hpp" namespace asio { /// Typedef for a timer based on the steady clock. /** * This typedef uses the C++11 @c <chrono> standard library facility, if * available. Otherwise, it may use the Boost.Chrono library. To explicitly * utilise Boost.Chrono, use the basic_waitable_timer template directly: * @code * typedef basic_waitable_timer timer; * @endcode */ typedef basic_waitable_timer steady_timer; } // namespace asio #endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_STEADY_TIMER_HPP galera-4-26.4.25/asio/asio/windows/000755 000164 177776 00000000000 15107057160 020070 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/windows/basic_random_access_handle.hpp000644 000164 177776 00000037773 15107057155 026103 0ustar00jenkinsnogroup000000 000000 // // windows/basic_random_access_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP #define ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/windows/basic_overlapped_handle.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides random-access handle functionality. /** * The windows::basic_random_access_handle class provides asynchronous and * blocking random-access handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_random_access_handle : public basic_overlapped_handle { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a handle. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef asio::detail::win_iocp_handle_service::native_handle_type native_handle_type; #endif /// Construct a random-access handle without opening it. /** * This constructor creates a random-access handle without opening it. * * @param ex The I/O executor that the random-access handle will use, by * default, to dispatch handlers for any asynchronous operations performed on * the random-access handle. */ explicit basic_random_access_handle(const executor_type& ex) : basic_overlapped_handle(ex) { } /// Construct a random-access handle without opening it. /** * This constructor creates a random-access handle without opening it. The * handle needs to be opened or assigned before data can be sent or received * on it. * * @param context An execution context which provides the I/O executor that * the random-access handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the random-access handle. */ template explicit basic_random_access_handle(ExecutionContext& context, typename enable_if< is_convertible::value, basic_random_access_handle >::type* = 0) : basic_overlapped_handle(context) { } /// Construct a random-access handle on an existing native handle. /** * This constructor creates a random-access handle object to hold an existing * native handle. * * @param ex The I/O executor that the random-access handle will use, by * default, to dispatch handlers for any asynchronous operations performed on * the random-access handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_random_access_handle(const executor_type& ex, const native_handle_type& handle) : basic_overlapped_handle(ex, handle) { } /// Construct a random-access handle on an existing native handle. /** * This constructor creates a random-access handle object to hold an existing * native handle. * * @param context An execution context which provides the I/O executor that * the random-access handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the random-access handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ template basic_random_access_handle(ExecutionContext& context, const native_handle_type& handle, typename enable_if< is_convertible::value >::type* = 0) : basic_overlapped_handle(context, handle) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a random-access handle from another. /** * This constructor moves a random-access handle from one object to another. * * @param other The other random-access handle object from which the * move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_random_access_handle(const executor_type&) * constructor. */ basic_random_access_handle(basic_random_access_handle&& other) : basic_overlapped_handle(std::move(other)) { } /// Move-assign a random-access handle from another. /** * This assignment operator moves a random-access handle from one object to * another. * * @param other The other random-access handle object from which the * move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_random_access_handle(const executor_type&) * constructor. */ basic_random_access_handle& operator=(basic_random_access_handle&& other) { basic_overlapped_handle::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Write some data to the handle at the specified offset. /** * This function is used to write data to the random-access handle. The * function call will block until one or more bytes of the data has been * written successfully, or until an error occurs. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some_at operation may not write all of the data. Consider * using the @ref write_at function if you need to ensure that all data is * written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.write_some_at(42, asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some_at(uint64_t offset, const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().write_some_at( this->impl_.get_implementation(), offset, buffers, ec); asio::detail::throw_error(ec, "write_some_at"); return s; } /// Write some data to the handle at the specified offset. /** * This function is used to write data to the random-access handle. The * function call will block until one or more bytes of the data has been * written successfully, or until an error occurs. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write_at function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some_at(uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().write_some_at( this->impl_.get_implementation(), offset, buffers, ec); } /// Start an asynchronous write at the specified offset. /** * This function is used to asynchronously write data to the random-access * handle. The function call always returns immediately. * * @param offset The offset at which the data will be written. * * @param buffers One or more data buffers to be written to the handle. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write_at function if you need to ensure that * all data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.async_write_some_at(42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some_at(uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; asio::async_completion init(handler); this->impl_.get_service().async_write_some_at( this->impl_.get_implementation(), offset, buffers, init.completion_handler, this->impl_.get_implementation_executor()); return init.result.get(); } /// Read some data from the handle at the specified offset. /** * This function is used to read data from the random-access handle. The * function call will block until one or more bytes of data has been read * successfully, or until an error occurs. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read_at function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.read_some_at(42, asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some_at(uint64_t offset, const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().read_some_at( this->impl_.get_implementation(), offset, buffers, ec); asio::detail::throw_error(ec, "read_some_at"); return s; } /// Read some data from the handle at the specified offset. /** * This function is used to read data from the random-access handle. The * function call will block until one or more bytes of data has been read * successfully, or until an error occurs. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read_at function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some_at(uint64_t offset, const MutableBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().read_some_at( this->impl_.get_implementation(), offset, buffers, ec); } /// Start an asynchronous read at the specified offset. /** * This function is used to asynchronously read data from the random-access * handle. The function call always returns immediately. * * @param offset The offset at which the data will be read. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read_at function if you need to ensure that * the requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.async_read_some_at(42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some_at(uint64_t offset, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; asio::async_completion init(handler); this->impl_.get_service().async_read_some_at( this->impl_.get_implementation(), offset, buffers, init.completion_handler, this->impl_.get_implementation_executor()); return init.result.get(); } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_RANDOM_ACCESS_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/stream_handle.hpp000644 000164 177776 00000001741 15107057155 023416 0ustar00jenkinsnogroup000000 000000 // // windows/stream_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_STREAM_HANDLE_HPP #define ASIO_WINDOWS_STREAM_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_stream_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of a stream-oriented handle. typedef basic_stream_handle<> stream_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_STREAM_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/overlapped_handle.hpp000644 000164 177776 00000002161 15107057155 024261 0ustar00jenkinsnogroup000000 000000 // // windows/overlapped_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP #define ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_overlapped_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of an overlapped handle. typedef basic_overlapped_handle<> overlapped_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OVERLAPPED_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/overlapped_ptr.hpp000644 000164 177776 00000007326 15107057155 023643 0ustar00jenkinsnogroup000000 000000 // // windows/overlapped_ptr.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OVERLAPPED_PTR_HPP #define ASIO_WINDOWS_OVERLAPPED_PTR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/noncopyable.hpp" #include "asio/detail/win_iocp_overlapped_ptr.hpp" #include "asio/io_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Wraps a handler to create an OVERLAPPED object for use with overlapped I/O. /** * A special-purpose smart pointer used to wrap an application handler so that * it can be passed as the LPOVERLAPPED argument to overlapped I/O functions. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class overlapped_ptr : private noncopyable { public: /// Construct an empty overlapped_ptr. overlapped_ptr() : impl_() { } /// Construct an overlapped_ptr to contain the specified handler. template explicit overlapped_ptr(ExecutionContext& context, ASIO_MOVE_ARG(Handler) handler, typename enable_if< is_convertible::value >::type* = 0) : impl_(context.get_executor(), ASIO_MOVE_CAST(Handler)(handler)) { } /// Construct an overlapped_ptr to contain the specified handler. template explicit overlapped_ptr(const Executor& ex, ASIO_MOVE_ARG(Handler) handler, typename enable_if< is_executor::value >::type* = 0) : impl_(ex, ASIO_MOVE_CAST(Handler)(handler)) { } /// Destructor automatically frees the OVERLAPPED object unless released. ~overlapped_ptr() { } /// Reset to empty. void reset() { impl_.reset(); } /// Reset to contain the specified handler, freeing any current OVERLAPPED /// object. template void reset(ExecutionContext& context, ASIO_MOVE_ARG(Handler) handler, typename enable_if< is_convertible::value >::type* = 0) { impl_.reset(context.get_executor(), ASIO_MOVE_CAST(Handler)(handler)); } /// Reset to contain the specified handler, freeing any current OVERLAPPED /// object. template void reset(const Executor& ex, ASIO_MOVE_ARG(Handler) handler, typename enable_if< is_executor::value >::type* = 0) { impl_.reset(ex, ASIO_MOVE_CAST(Handler)(handler)); } /// Get the contained OVERLAPPED object. OVERLAPPED* get() { return impl_.get(); } /// Get the contained OVERLAPPED object. const OVERLAPPED* get() const { return impl_.get(); } /// Release ownership of the OVERLAPPED object. OVERLAPPED* release() { return impl_.release(); } /// Post completion notification for overlapped operation. Releases ownership. void complete(const asio::error_code& ec, std::size_t bytes_transferred) { impl_.complete(ec, bytes_transferred); } private: detail::win_iocp_overlapped_ptr impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OVERLAPPED_PTR) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OVERLAPPED_PTR_HPP galera-4-26.4.25/asio/asio/windows/basic_object_handle.hpp000644 000164 177776 00000030664 15107057155 024540 0ustar00jenkinsnogroup000000 000000 // // windows/basic_object_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP #define ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/async_result.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/win_object_handle_service.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides object-oriented handle functionality. /** * The windows::basic_object_handle class provides asynchronous and blocking * object-oriented handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_object_handle { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a handle. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef asio::detail::win_object_handle_service::native_handle_type native_handle_type; #endif /// An object handle is always the lowest layer. typedef basic_object_handle lowest_layer_type; /// Construct an object handle without opening it. /** * This constructor creates an object handle without opening it. * * @param ex The I/O executor that the object handle will use, by default, to * dispatch handlers for any asynchronous operations performed on the * object handle. */ explicit basic_object_handle(const executor_type& ex) : impl_(ex) { } /// Construct an object handle without opening it. /** * This constructor creates an object handle without opening it. * * @param context An execution context which provides the I/O executor that * the object handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the object handle. */ template explicit basic_object_handle(ExecutionContext& context, typename enable_if< is_convertible::value, basic_object_handle >::type* = 0) : impl_(context) { } /// Construct an object handle on an existing native handle. /** * This constructor creates an object handle object to hold an existing native * handle. * * @param ex The I/O executor that the object handle will use, by default, to * dispatch handlers for any asynchronous operations performed on the * object handle. * * @param native_handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_object_handle(const executor_type& ex, const native_handle_type& native_handle) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_handle, ec); asio::detail::throw_error(ec, "assign"); } /// Construct an object handle on an existing native handle. /** * This constructor creates an object handle object to hold an existing native * handle. * * @param context An execution context which provides the I/O executor that * the object handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the object handle. * * @param native_handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ template basic_object_handle(ExecutionContext& context, const native_handle_type& native_handle, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_handle, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct an object handle from another. /** * This constructor moves an object handle from one object to another. * * @param other The other object handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_object_handle(const executor_type&) * constructor. */ basic_object_handle(basic_object_handle&& other) : impl_(std::move(other.impl_)) { } /// Move-assign an object handle from another. /** * This assignment operator moves an object handle from one object to another. * * @param other The other object handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_object_handle(const executor_type&) * constructor. */ basic_object_handle& operator=(basic_object_handle&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since an object handle cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since an object handle cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @throws asio::system_error Thrown on failure. */ void assign(const native_handle_type& handle) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), handle, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const native_handle_type& handle, asio::error_code& ec) { impl_.get_service().assign(impl_.get_implementation(), handle, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the handle is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the native handle representation. /** * This function may be used to obtain the underlying representation of the * handle. This is intended to allow access to native handle functionality * that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform a blocking wait on the object handle. /** * This function is used to wait for the object handle to be set to the * signalled state. This function blocks and does not return until the object * handle has been set to the signalled state. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the object handle. /** * This function is used to wait for the object handle to be set to the * signalled state. This function blocks and does not return until the object * handle has been set to the signalled state. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), ec); } /// Start an asynchronous wait on the object handle. /** * This function is be used to initiate an asynchronous wait against the * object handle. It always returns immediately. * * @param handler The handler to be called when the object handle is set to * the signalled state. Copies will be made of the handler as required. The * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { asio::async_completion init(handler); impl_.get_service().async_wait(impl_.get_implementation(), init.completion_handler, impl_.get_implementation_executor()); return init.result.get(); } private: // Disallow copying and assignment. basic_object_handle(const basic_object_handle&) ASIO_DELETED; basic_object_handle& operator=(const basic_object_handle&) ASIO_DELETED; asio::detail::io_object_impl< asio::detail::win_object_handle_service, Executor> impl_; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_OBJECT_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/random_access_handle.hpp000644 000164 177776 00000002045 15107057155 024722 0ustar00jenkinsnogroup000000 000000 // // windows/random_access_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP #define ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_random_access_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of a random-access handle. typedef basic_random_access_handle<> random_access_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_RANDOM_ACCESS_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/basic_stream_handle.hpp000644 000164 177776 00000035736 15107057155 024572 0ustar00jenkinsnogroup000000 000000 // // windows/basic_stream_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP #define ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/windows/basic_overlapped_handle.hpp" #if defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides stream-oriented handle functionality. /** * The windows::basic_stream_handle class provides asynchronous and blocking * stream-oriented handle functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class basic_stream_handle : public basic_overlapped_handle { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a handle. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef asio::detail::win_iocp_handle_service::native_handle_type native_handle_type; #endif /// Construct a stream handle without opening it. /** * This constructor creates a stream handle without opening it. * * @param ex The I/O executor that the stream handle will use, by default, to * dispatch handlers for any asynchronous operations performed on the stream * handle. */ explicit basic_stream_handle(const executor_type& ex) : basic_overlapped_handle(ex) { } /// Construct a stream handle without opening it. /** * This constructor creates a stream handle without opening it. The handle * needs to be opened or assigned before data can be sent or received on it. * * @param context An execution context which provides the I/O executor that * the stream handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the stream handle. */ template explicit basic_stream_handle(ExecutionContext& context, typename enable_if< is_convertible::value, basic_stream_handle >::type* = 0) : basic_overlapped_handle(context) { } /// Construct a stream handle on an existing native handle. /** * This constructor creates a stream handle object to hold an existing native * handle. * * @param ex The I/O executor that the stream handle will use, by default, to * dispatch handlers for any asynchronous operations performed on the stream * handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_stream_handle(const executor_type& ex, const native_handle_type& handle) : basic_overlapped_handle(ex, handle) { } /// Construct a stream handle on an existing native handle. /** * This constructor creates a stream handle object to hold an existing native * handle. * * @param context An execution context which provides the I/O executor that * the stream handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the stream handle. * * @param handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ template basic_stream_handle(ExecutionContext& context, const native_handle_type& handle, typename enable_if< is_convertible::value >::type* = 0) : basic_overlapped_handle(context, handle) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a stream handle from another. /** * This constructor moves a stream handle from one object to another. * * @param other The other stream handle object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_handle(const executor_type&) * constructor. */ basic_stream_handle(basic_stream_handle&& other) : basic_overlapped_handle(std::move(other)) { } /// Move-assign a stream handle from another. /** * This assignment operator moves a stream handle from one object to * another. * * @param other The other stream handle object from which the move will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_stream_handle(const executor_type&) * constructor. */ basic_stream_handle& operator=(basic_stream_handle&& other) { basic_overlapped_handle::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Write some data to the handle. /** * This function is used to write data to the stream handle. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the handle. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().write_some( this->impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the handle. /** * This function is used to write data to the stream handle. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the handle. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().write_some( this->impl_.get_implementation(), buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the stream handle. * The function call always returns immediately. * * @param buffers One or more data buffers to be written to the handle. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * handle.async_write_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; asio::async_completion init(handler); this->impl_.get_service().async_write_some( this->impl_.get_implementation(), buffers, init.completion_handler, this->impl_.get_implementation_executor()); return init.result.get(); } /// Read some data from the handle. /** * This function is used to read data from the stream handle. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = this->impl_.get_service().read_some( this->impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the handle. /** * This function is used to read data from the stream handle. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return this->impl_.get_service().read_some( this->impl_.get_implementation(), buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the stream handle. * The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * handle.async_read_some(asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { // If you get an error on the following line it means that your handler does // not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; asio::async_completion init(handler); this->impl_.get_service().async_read_some( this->impl_.get_implementation(), buffers, init.completion_handler, this->impl_.get_implementation_executor()); return init.result.get(); } }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_STREAM_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/object_handle.hpp000644 000164 177776 00000002024 15107057155 023364 0ustar00jenkinsnogroup000000 000000 // // windows/object_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2011 Boris Schaeling (boris@highscore.de) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_OBJECT_HANDLE_HPP #define ASIO_WINDOWS_OBJECT_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include "asio/windows/basic_object_handle.hpp" namespace asio { namespace windows { /// Typedef for the typical usage of an object handle. typedef basic_object_handle<> object_handle; } // namespace windows } // namespace asio #endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_OBJECT_HANDLE_HPP galera-4-26.4.25/asio/asio/windows/basic_overlapped_handle.hpp000644 000164 177776 00000025723 15107057155 025433 0ustar00jenkinsnogroup000000 000000 // // windows/basic_overlapped_handle.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP #define ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) \ || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/async_result.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/win_iocp_handle_service.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { namespace windows { /// Provides Windows handle functionality for objects that support /// overlapped I/O. /** * The windows::overlapped_handle class provides the ability to wrap a Windows * handle. The underlying object referred to by the handle must support * overlapped I/O. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_overlapped_handle { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a handle. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #else typedef asio::detail::win_iocp_handle_service::native_handle_type native_handle_type; #endif /// An overlapped_handle is always the lowest layer. typedef basic_overlapped_handle lowest_layer_type; /// Construct an overlapped handle without opening it. /** * This constructor creates an overlapped handle without opening it. * * @param ex The I/O executor that the overlapped handle will use, by default, * to dispatch handlers for any asynchronous operations performed on the * overlapped handle. */ explicit basic_overlapped_handle(const executor_type& ex) : impl_(ex) { } /// Construct an overlapped handle without opening it. /** * This constructor creates an overlapped handle without opening it. * * @param context An execution context which provides the I/O executor that * the overlapped handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the overlapped handle. */ template explicit basic_overlapped_handle(ExecutionContext& context, typename enable_if< is_convertible::value, basic_overlapped_handle >::type* = 0) : impl_(context) { } /// Construct an overlapped handle on an existing native handle. /** * This constructor creates an overlapped handle object to hold an existing * native handle. * * @param ex The I/O executor that the overlapped handle will use, by default, * to dispatch handlers for any asynchronous operations performed on the * overlapped handle. * * @param native_handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ basic_overlapped_handle(const executor_type& ex, const native_handle_type& native_handle) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_handle, ec); asio::detail::throw_error(ec, "assign"); } /// Construct an overlapped handle on an existing native handle. /** * This constructor creates an overlapped handle object to hold an existing * native handle. * * @param context An execution context which provides the I/O executor that * the overlapped handle will use, by default, to dispatch handlers for any * asynchronous operations performed on the overlapped handle. * * @param native_handle The new underlying handle implementation. * * @throws asio::system_error Thrown on failure. */ template basic_overlapped_handle(ExecutionContext& context, const native_handle_type& native_handle, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_handle, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct an overlapped handle from another. /** * This constructor moves a handle from one object to another. * * @param other The other overlapped handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c overlapped_handle(const executor_type&) * constructor. */ basic_overlapped_handle(basic_overlapped_handle&& other) : impl_(std::move(other.impl_)) { } /// Move-assign an overlapped handle from another. /** * This assignment operator moves a handle from one object to another. * * @param other The other overlapped handle object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c overlapped_handle(const executor_type&) * constructor. */ basic_overlapped_handle& operator=(basic_overlapped_handle&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since an overlapped_handle cannot contain any further layers, it * simply returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since an overlapped_handle cannot contain any further layers, it * simply returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @throws asio::system_error Thrown on failure. */ void assign(const native_handle_type& handle) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), handle, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native handle to the handle. /* * This function opens the handle to hold an existing native handle. * * @param handle A native handle. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const native_handle_type& handle, asio::error_code& ec) { impl_.get_service().assign(impl_.get_implementation(), handle, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the handle is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the handle. /** * This function is used to close the handle. Any asynchronous read or write * operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the native handle representation. /** * This function may be used to obtain the underlying representation of the * handle. This is intended to allow access to native handle functionality * that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the handle. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } protected: /// Protected destructor to prevent deletion through this type. /** * This function destroys the handle, cancelling any outstanding asynchronous * wait operations associated with the handle as if by calling @c cancel. */ ~basic_overlapped_handle() { } asio::detail::io_object_impl< asio::detail::win_iocp_handle_service, Executor> impl_; private: // Disallow copying and assignment. basic_overlapped_handle(const basic_overlapped_handle&) ASIO_DELETED; basic_overlapped_handle& operator=( const basic_overlapped_handle&) ASIO_DELETED; }; } // namespace windows } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE) // || defined(ASIO_HAS_WINDOWS_STREAM_HANDLE) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_WINDOWS_BASIC_OVERLAPPED_HANDLE_HPP galera-4-26.4.25/asio/asio/basic_socket_streambuf.hpp000644 000164 177776 00000051004 15107057155 023614 0ustar00jenkinsnogroup000000 000000 // // basic_socket_streambuf.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_STREAMBUF_HPP #define ASIO_BASIC_SOCKET_STREAMBUF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include #include "asio/basic_socket.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/throw_error.hpp" #include "asio/io_context.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) \ && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) # include "asio/detail/deadline_timer_service.hpp" #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) # include "asio/steady_timer.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # include "asio/detail/variadic_templates.hpp" // A macro that should expand to: // template // basic_socket_streambuf* connect(T1 x1, ..., Tn xn) // { // init_buffers(); // typedef typename Protocol::resolver resolver_type; // resolver_type resolver(socket().get_executor()); // connect_to_endpoints( // resolver.resolve(x1, ..., xn, ec_)); // return !ec_ ? this : 0; // } // This macro should only persist within this file. # define ASIO_PRIVATE_CONNECT_DEF(n) \ template \ basic_socket_streambuf* connect(ASIO_VARIADIC_BYVAL_PARAMS(n)) \ { \ init_buffers(); \ typedef typename Protocol::resolver resolver_type; \ resolver_type resolver(socket().get_executor()); \ connect_to_endpoints( \ resolver.resolve(ASIO_VARIADIC_BYVAL_ARGS(n), ec_)); \ return !ec_ ? this : 0; \ } \ /**/ #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A separate base class is used to ensure that the io_context member is // initialised prior to the basic_socket_streambuf's basic_socket base class. class socket_streambuf_io_context { protected: socket_streambuf_io_context(io_context* ctx) : default_io_context_(ctx) { } shared_ptr default_io_context_; }; // A separate base class is used to ensure that the dynamically allocated // buffers are constructed prior to the basic_socket_streambuf's basic_socket // base class. This makes moving the socket is the last potentially throwing // step in the streambuf's move constructor, giving the constructor a strong // exception safety guarantee. class socket_streambuf_buffers { protected: socket_streambuf_buffers() : get_buffer_(buffer_size), put_buffer_(buffer_size) { } enum { buffer_size = 512 }; std::vector get_buffer_; std::vector put_buffer_; }; } // namespace detail #if !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL) #define ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL // Forward declaration with defaulted arguments. template > #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typename Clock = chrono::steady_clock, typename WaitTraits = wait_traits > #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) class basic_socket_streambuf; #endif // !defined(ASIO_BASIC_SOCKET_STREAMBUF_FWD_DECL) /// Iostream streambuf for a socket. #if defined(GENERATING_DOCUMENTATION) template > #else // defined(GENERATING_DOCUMENTATION) template #endif // defined(GENERATING_DOCUMENTATION) class basic_socket_streambuf : public std::streambuf, private detail::socket_streambuf_io_context, private detail::socket_streambuf_buffers, #if defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) private basic_socket #else // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) public basic_socket #endif // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) { private: // These typedefs are intended keep this class's implementation independent // of whether it's using Boost.DateClock, Boost.Chrono or std::chrono. #if defined(ASIO_HAS_BOOST_DATE_TIME) \ && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typedef WaitTraits traits_helper; #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typedef detail::chrono_time_traits traits_helper; #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) public: /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// The clock type. typedef Clock clock_type; #if defined(GENERATING_DOCUMENTATION) /// (Deprecated: Use time_point.) The time type. typedef typename WaitTraits::time_type time_type; /// The time type. typedef typename WaitTraits::time_point time_point; /// (Deprecated: Use duration.) The duration type. typedef typename WaitTraits::duration_type duration_type; /// The duration type. typedef typename WaitTraits::duration duration; #else # if !defined(ASIO_NO_DEPRECATED) typedef typename traits_helper::time_type time_type; typedef typename traits_helper::duration_type duration_type; # endif // !defined(ASIO_NO_DEPRECATED) typedef typename traits_helper::time_type time_point; typedef typename traits_helper::duration_type duration; #endif /// Construct a basic_socket_streambuf without establishing a connection. basic_socket_streambuf() : detail::socket_streambuf_io_context(new io_context), basic_socket(*default_io_context_), expiry_time_(max_expiry_time()) { init_buffers(); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Construct a basic_socket_streambuf from the supplied socket. explicit basic_socket_streambuf(basic_stream_socket s) : detail::socket_streambuf_io_context(0), basic_socket(std::move(s)), expiry_time_(max_expiry_time()) { init_buffers(); } /// Move-construct a basic_socket_streambuf from another. basic_socket_streambuf(basic_socket_streambuf&& other) : detail::socket_streambuf_io_context(other), basic_socket(std::move(other.socket())), ec_(other.ec_), expiry_time_(other.expiry_time_) { get_buffer_.swap(other.get_buffer_); put_buffer_.swap(other.put_buffer_); setg(other.eback(), other.gptr(), other.egptr()); setp(other.pptr(), other.epptr()); other.ec_ = asio::error_code(); other.expiry_time_ = max_expiry_time(); other.init_buffers(); } /// Move-assign a basic_socket_streambuf from another. basic_socket_streambuf& operator=(basic_socket_streambuf&& other) { this->close(); socket() = std::move(other.socket()); detail::socket_streambuf_io_context::operator=(other); ec_ = other.ec_; expiry_time_ = other.expiry_time_; get_buffer_.swap(other.get_buffer_); put_buffer_.swap(other.put_buffer_); setg(other.eback(), other.gptr(), other.egptr()); setp(other.pptr(), other.epptr()); other.ec_ = asio::error_code(); other.expiry_time_ = max_expiry_time(); other.put_buffer_.resize(buffer_size); other.init_buffers(); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor flushes buffered data. virtual ~basic_socket_streambuf() { if (pptr() != pbase()) overflow(traits_type::eof()); } /// Establish a connection. /** * This function establishes a connection to the specified endpoint. * * @return \c this if a connection was successfully established, a null * pointer otherwise. */ basic_socket_streambuf* connect(const endpoint_type& endpoint) { init_buffers(); ec_ = asio::error_code(); this->connect_to_endpoints(&endpoint, &endpoint + 1); return !ec_ ? this : 0; } #if defined(GENERATING_DOCUMENTATION) /// Establish a connection. /** * This function automatically establishes a connection based on the supplied * resolver query parameters. The arguments are used to construct a resolver * query object. * * @return \c this if a connection was successfully established, a null * pointer otherwise. */ template basic_socket_streambuf* connect(T1 t1, ..., TN tn); #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) template basic_socket_streambuf* connect(T... x) { init_buffers(); typedef typename Protocol::resolver resolver_type; resolver_type resolver(socket().get_executor()); connect_to_endpoints(resolver.resolve(x..., ec_)); return !ec_ ? this : 0; } #else ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CONNECT_DEF) #endif /// Close the connection. /** * @return \c this if a connection was successfully established, a null * pointer otherwise. */ basic_socket_streambuf* close() { sync(); socket().close(ec_); if (!ec_) init_buffers(); return !ec_ ? this : 0; } /// Get a reference to the underlying socket. basic_socket& socket() { return *this; } /// Get the last error associated with the stream buffer. /** * @return An \c error_code corresponding to the last error from the stream * buffer. */ const asio::error_code& error() const { return ec_; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use error().) Get the last error associated with the stream /// buffer. /** * @return An \c error_code corresponding to the last error from the stream * buffer. */ const asio::error_code& puberror() const { return error(); } /// (Deprecated: Use expiry().) Get the stream buffer's expiry time as an /// absolute time. /** * @return An absolute time value representing the stream buffer's expiry * time. */ time_point expires_at() const { return expiry_time_; } #endif // !defined(ASIO_NO_DEPRECATED) /// Get the stream buffer's expiry time as an absolute time. /** * @return An absolute time value representing the stream buffer's expiry * time. */ time_point expiry() const { return expiry_time_; } /// Set the stream buffer's expiry time as an absolute time. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the stream. */ void expires_at(const time_point& expiry_time) { expiry_time_ = expiry_time; } /// Set the stream buffer's expiry time relative to now. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the timer. */ void expires_after(const duration& expiry_time) { expiry_time_ = traits_helper::add(traits_helper::now(), expiry_time); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use expiry().) Get the stream buffer's expiry time relative /// to now. /** * @return A relative time value representing the stream buffer's expiry time. */ duration expires_from_now() const { return traits_helper::subtract(expires_at(), traits_helper::now()); } /// (Deprecated: Use expires_after().) Set the stream buffer's expiry time /// relative to now. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the timer. */ void expires_from_now(const duration& expiry_time) { expiry_time_ = traits_helper::add(traits_helper::now(), expiry_time); } #endif // !defined(ASIO_NO_DEPRECATED) protected: int_type underflow() { #if defined(ASIO_WINDOWS_RUNTIME) ec_ = asio::error::operation_not_supported; return traits_type::eof(); #else // defined(ASIO_WINDOWS_RUNTIME) if (gptr() != egptr()) return traits_type::eof(); for (;;) { // Check if we are past the expiry time. if (traits_helper::less_than(expiry_time_, traits_helper::now())) { ec_ = asio::error::timed_out; return traits_type::eof(); } // Try to complete the operation without blocking. if (!socket().native_non_blocking()) socket().native_non_blocking(true, ec_); detail::buffer_sequence_adapter bufs(asio::buffer(get_buffer_) + putback_max); detail::signed_size_type bytes = detail::socket_ops::recv( socket().native_handle(), bufs.buffers(), bufs.count(), 0, ec_); // Check if operation succeeded. if (bytes > 0) { setg(&get_buffer_[0], &get_buffer_[0] + putback_max, &get_buffer_[0] + putback_max + bytes); return traits_type::to_int_type(*gptr()); } // Check for EOF. if (bytes == 0) { ec_ = asio::error::eof; return traits_type::eof(); } // Operation failed. if (ec_ != asio::error::would_block && ec_ != asio::error::try_again) return traits_type::eof(); // Wait for socket to become ready. if (detail::socket_ops::poll_read( socket().native_handle(), 0, timeout(), ec_) < 0) return traits_type::eof(); } #endif // defined(ASIO_WINDOWS_RUNTIME) } int_type overflow(int_type c) { #if defined(ASIO_WINDOWS_RUNTIME) ec_ = asio::error::operation_not_supported; return traits_type::eof(); #else // defined(ASIO_WINDOWS_RUNTIME) char_type ch = traits_type::to_char_type(c); // Determine what needs to be sent. const_buffer output_buffer; if (put_buffer_.empty()) { if (traits_type::eq_int_type(c, traits_type::eof())) return traits_type::not_eof(c); // Nothing to do. output_buffer = asio::buffer(&ch, sizeof(char_type)); } else { output_buffer = asio::buffer(pbase(), (pptr() - pbase()) * sizeof(char_type)); } while (output_buffer.size() > 0) { // Check if we are past the expiry time. if (traits_helper::less_than(expiry_time_, traits_helper::now())) { ec_ = asio::error::timed_out; return traits_type::eof(); } // Try to complete the operation without blocking. if (!socket().native_non_blocking()) socket().native_non_blocking(true, ec_); detail::buffer_sequence_adapter< const_buffer, const_buffer> bufs(output_buffer); detail::signed_size_type bytes = detail::socket_ops::send( socket().native_handle(), bufs.buffers(), bufs.count(), 0, ec_); // Check if operation succeeded. if (bytes > 0) { output_buffer += static_cast(bytes); continue; } // Operation failed. if (ec_ != asio::error::would_block && ec_ != asio::error::try_again) return traits_type::eof(); // Wait for socket to become ready. if (detail::socket_ops::poll_write( socket().native_handle(), 0, timeout(), ec_) < 0) return traits_type::eof(); } if (!put_buffer_.empty()) { setp(&put_buffer_[0], &put_buffer_[0] + put_buffer_.size()); // If the new character is eof then our work here is done. if (traits_type::eq_int_type(c, traits_type::eof())) return traits_type::not_eof(c); // Add the new character to the output buffer. *pptr() = ch; pbump(1); } return c; #endif // defined(ASIO_WINDOWS_RUNTIME) } int sync() { return overflow(traits_type::eof()); } std::streambuf* setbuf(char_type* s, std::streamsize n) { if (pptr() == pbase() && s == 0 && n == 0) { put_buffer_.clear(); setp(0, 0); sync(); return this; } return 0; } private: // Disallow copying and assignment. basic_socket_streambuf(const basic_socket_streambuf&) ASIO_DELETED; basic_socket_streambuf& operator=( const basic_socket_streambuf&) ASIO_DELETED; void init_buffers() { setg(&get_buffer_[0], &get_buffer_[0] + putback_max, &get_buffer_[0] + putback_max); if (put_buffer_.empty()) setp(0, 0); else setp(&put_buffer_[0], &put_buffer_[0] + put_buffer_.size()); } int timeout() const { int64_t msec = traits_helper::to_posix_duration( traits_helper::subtract(expiry_time_, traits_helper::now())).total_milliseconds(); if (msec > (std::numeric_limits::max)()) msec = (std::numeric_limits::max)(); else if (msec < 0) msec = 0; return static_cast(msec); } template void connect_to_endpoints(const EndpointSequence& endpoints) { this->connect_to_endpoints(endpoints.begin(), endpoints.end()); } template void connect_to_endpoints(EndpointIterator begin, EndpointIterator end) { #if defined(ASIO_WINDOWS_RUNTIME) ec_ = asio::error::operation_not_supported; #else // defined(ASIO_WINDOWS_RUNTIME) if (ec_) return; ec_ = asio::error::not_found; for (EndpointIterator i = begin; i != end; ++i) { // Check if we are past the expiry time. if (traits_helper::less_than(expiry_time_, traits_helper::now())) { ec_ = asio::error::timed_out; return; } // Close and reopen the socket. typename Protocol::endpoint ep(*i); socket().close(ec_); socket().open(ep.protocol(), ec_); if (ec_) continue; // Try to complete the operation without blocking. if (!socket().native_non_blocking()) socket().native_non_blocking(true, ec_); detail::socket_ops::connect(socket().native_handle(), ep.data(), ep.size(), ec_); // Check if operation succeeded. if (!ec_) return; // Operation failed. if (ec_ != asio::error::in_progress && ec_ != asio::error::would_block) continue; // Wait for socket to become ready. if (detail::socket_ops::poll_connect( socket().native_handle(), timeout(), ec_) < 0) continue; // Get the error code from the connect operation. int connect_error = 0; size_t connect_error_len = sizeof(connect_error); if (detail::socket_ops::getsockopt(socket().native_handle(), 0, SOL_SOCKET, SO_ERROR, &connect_error, &connect_error_len, ec_) == detail::socket_error_retval) return; // Check the result of the connect operation. ec_ = asio::error_code(connect_error, asio::error::get_system_category()); if (!ec_) return; } #endif // defined(ASIO_WINDOWS_RUNTIME) } // Helper function to get the maximum expiry time. static time_point max_expiry_time() { #if defined(ASIO_HAS_BOOST_DATE_TIME) \ && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) return boost::posix_time::pos_infin; #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) return (time_point::max)(); #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) } enum { putback_max = 8 }; asio::error_code ec_; time_point expiry_time_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # undef ASIO_PRIVATE_CONNECT_DEF #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_BASIC_SOCKET_STREAMBUF_HPP galera-4-26.4.25/asio/asio/basic_socket.hpp000644 000164 177776 00000170767 15107057155 021566 0ustar00jenkinsnogroup000000 000000 // // basic_socket.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_HPP #define ASIO_BASIC_SOCKET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/post.hpp" #include "asio/socket_base.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_socket_service.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_socket_service.hpp" #else # include "asio/detail/reactive_socket_service.hpp" #endif #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_SOCKET_FWD_DECL) #define ASIO_BASIC_SOCKET_FWD_DECL // Forward declaration with defaulted arguments. template class basic_socket; #endif // !defined(ASIO_BASIC_SOCKET_FWD_DECL) /// Provides socket functionality. /** * The basic_socket class template provides functionality that is common to both * stream-oriented and datagram-oriented sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_socket : public socket_base { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Rebinds the socket type to another executor. template struct rebind_executor { /// The socket type when rebound to the specified executor. typedef basic_socket other; }; /// The native representation of a socket. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #elif defined(ASIO_WINDOWS_RUNTIME) typedef typename detail::null_socket_service< Protocol>::native_handle_type native_handle_type; #elif defined(ASIO_HAS_IOCP) typedef typename detail::win_iocp_socket_service< Protocol>::native_handle_type native_handle_type; #else typedef typename detail::reactive_socket_service< Protocol>::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; #if !defined(ASIO_NO_EXTENSIONS) /// A basic_socket is always the lowest layer. typedef basic_socket lowest_layer_type; #endif // !defined(ASIO_NO_EXTENSIONS) /// Construct a basic_socket without opening it. /** * This constructor creates a socket without opening it. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. */ explicit basic_socket(const executor_type& ex) : impl_(ex) { } /// Construct a basic_socket without opening it. /** * This constructor creates a socket without opening it. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. */ template explicit basic_socket(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Construct and open a basic_socket. /** * This constructor creates and opens a socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_socket(const executor_type& ex, const protocol_type& protocol) : impl_(ex) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct and open a basic_socket. /** * This constructor creates and opens a socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_socket(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct a basic_socket, opening it and binding it to the given local /// endpoint. /** * This constructor creates a socket and automatically opens it bound to the * specified endpoint on the local machine. The protocol used is the protocol * associated with the given endpoint. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. */ basic_socket(const executor_type& ex, const endpoint_type& endpoint) : impl_(ex) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Construct a basic_socket, opening it and binding it to the given local /// endpoint. /** * This constructor creates a socket and automatically opens it bound to the * specified endpoint on the local machine. The protocol used is the protocol * associated with the given endpoint. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. */ template basic_socket(ExecutionContext& context, const endpoint_type& endpoint, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Construct a basic_socket on an existing native socket. /** * This constructor creates a socket object to hold an existing native socket. * * @param ex The I/O executor that the socket will use, by default, to * dispatch handlers for any asynchronous operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ basic_socket(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_socket) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } /// Construct a basic_socket on an existing native socket. /** * This constructor creates a socket object to hold an existing native socket. * * @param context An execution context which provides the I/O executor that * the socket will use, by default, to dispatch handlers for any asynchronous * operations performed on the socket. * * @param protocol An object specifying protocol parameters to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ template basic_socket(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_socket, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_socket from another. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(const executor_type&) constructor. */ basic_socket(basic_socket&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_socket from another. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(const executor_type&) constructor. */ basic_socket& operator=(basic_socket&& other) { impl_ = std::move(other.impl_); return *this; } // All sockets have access to each other's implementations. template friend class basic_socket; /// Move-construct a basic_socket from a socket of another protocol type. /** * This constructor moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(const executor_type&) constructor. */ template basic_socket(basic_socket&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_socket from a socket of another protocol type. /** * This assignment operator moves a socket from one object to another. * * @param other The other basic_socket object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket(const executor_type&) constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_socket& >::type operator=(basic_socket && other) { basic_socket tmp(std::move(other)); impl_ = std::move(tmp.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } #if !defined(ASIO_NO_EXTENSIONS) /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_socket cannot contain any further layers, it simply * returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } #endif // !defined(ASIO_NO_EXTENSIONS) /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * socket.open(asio::ip::tcp::v4()); * @endcode */ void open(const protocol_type& protocol = protocol_type()) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Open the socket using the specified protocol. /** * This function opens the socket so that it will use the specified protocol. * * @param protocol An object specifying which protocol is to be used. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * asio::error_code ec; * socket.open(asio::ip::tcp::v4(), ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID open(const protocol_type& protocol, asio::error_code& ec) { impl_.get_service().open(impl_.get_implementation(), protocol, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @throws asio::system_error Thrown on failure. */ void assign(const protocol_type& protocol, const native_handle_type& native_socket) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_socket, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native socket to the socket. /* * This function opens the socket to hold an existing native socket. * * @param protocol An object specifying which protocol is to be used. * * @param native_socket A native socket. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const protocol_type& protocol, const native_handle_type& native_socket, asio::error_code& ec) { impl_.get_service().assign(impl_.get_implementation(), protocol, native_socket, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the socket is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the socket. /** * This function is used to close the socket. Any asynchronous send, receive * or connect operations will be cancelled immediately, and will complete * with the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. Note that, even if * the function indicates an error, the underlying descriptor is closed. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::error_code ec; * socket.close(ec); * if (ec) * { * // An error occurred. * } * @endcode * * @note For portable behaviour with respect to graceful closure of a * connected socket, call shutdown() before closing the socket. */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Release ownership of the underlying native socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. Ownership * of the native socket is then transferred to the caller. * * @throws asio::system_error Thrown on failure. * * @note This function is unsupported on Windows versions prior to Windows * 8.1, and will fail with asio::error::operation_not_supported on * these platforms. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603) __declspec(deprecated("This function always fails with " "operation_not_supported when used on Windows versions " "prior to Windows 8.1.")) #endif native_handle_type release() { asio::error_code ec; native_handle_type s = impl_.get_service().release( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "release"); return s; } /// Release ownership of the underlying native socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. Ownership * of the native socket is then transferred to the caller. * * @param ec Set to indicate what error occurred, if any. * * @note This function is unsupported on Windows versions prior to Windows * 8.1, and will fail with asio::error::operation_not_supported on * these platforms. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603) __declspec(deprecated("This function always fails with " "operation_not_supported when used on Windows versions " "prior to Windows 8.1.")) #endif native_handle_type release(asio::error_code& ec) { return impl_.get_service().release(impl_.get_implementation(), ec); } /// Get the native socket representation. /** * This function may be used to obtain the underlying representation of the * socket. This is intended to allow access to native socket functionality * that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the socket. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. * * @note Calls to cancel() will always fail with * asio::error::operation_not_supported when run on Windows XP, Windows * Server 2003, and earlier versions of Windows, unless * ASIO_ENABLE_CANCELIO is defined. However, the CancelIo function has * two issues that should be considered before enabling its use: * * @li It will only cancel asynchronous operations that were initiated in the * current thread. * * @li It can appear to complete without error, but the request to cancel the * unfinished operations may be silently ignored by the operating system. * Whether it works or not seems to depend on the drivers that are installed. * * For portable cancellation, consider using one of the following * alternatives: * * @li Disable asio's I/O completion port backend by defining * ASIO_DISABLE_IOCP. * * @li Use the close() function to simultaneously cancel the outstanding * operations and close the socket. * * When running on Windows Vista, Windows Server 2008, and later, the * CancelIoEx function is always used. This function does not have the * problems described above. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600) \ && !defined(ASIO_ENABLE_CANCELIO) __declspec(deprecated("By default, this function always fails with " "operation_not_supported when used on Windows XP, Windows Server 2003, " "or earlier. Consult documentation for details.")) #endif ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @return A bool indicating whether the socket is at the out-of-band data * mark. * * @throws asio::system_error Thrown on failure. */ bool at_mark() const { asio::error_code ec; bool b = impl_.get_service().at_mark(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "at_mark"); return b; } /// Determine whether the socket is at the out-of-band data mark. /** * This function is used to check whether the socket input is currently * positioned at the out-of-band data mark. * * @param ec Set to indicate what error occurred, if any. * * @return A bool indicating whether the socket is at the out-of-band data * mark. */ bool at_mark(asio::error_code& ec) const { return impl_.get_service().at_mark(impl_.get_implementation(), ec); } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. * * @throws asio::system_error Thrown on failure. */ std::size_t available() const { asio::error_code ec; std::size_t s = impl_.get_service().available( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "available"); return s; } /// Determine the number of bytes available for reading. /** * This function is used to determine the number of bytes that may be read * without blocking. * * @param ec Set to indicate what error occurred, if any. * * @return The number of bytes that may be read without blocking, or 0 if an * error occurs. */ std::size_t available(asio::error_code& ec) const { return impl_.get_service().available(impl_.get_implementation(), ec); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * socket.open(asio::ip::tcp::v4()); * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345)); * @endcode */ void bind(const endpoint_type& endpoint) { asio::error_code ec; impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Bind the socket to the given local endpoint. /** * This function binds the socket to the specified endpoint on the local * machine. * * @param endpoint An endpoint on the local machine to which the socket will * be bound. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * socket.open(asio::ip::tcp::v4()); * asio::error_code ec; * socket.bind(asio::ip::tcp::endpoint( * asio::ip::tcp::v4(), 12345), ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID bind(const endpoint_type& endpoint, asio::error_code& ec) { impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.connect(endpoint); * @endcode */ void connect(const endpoint_type& peer_endpoint) { asio::error_code ec; if (!is_open()) { impl_.get_service().open(impl_.get_implementation(), peer_endpoint.protocol(), ec); asio::detail::throw_error(ec, "connect"); } impl_.get_service().connect(impl_.get_implementation(), peer_endpoint, ec); asio::detail::throw_error(ec, "connect"); } /// Connect the socket to the specified endpoint. /** * This function is used to connect a socket to the specified remote endpoint. * The function call will block until the connection is successfully made or * an error occurs. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * asio::error_code ec; * socket.connect(endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID connect(const endpoint_type& peer_endpoint, asio::error_code& ec) { if (!is_open()) { impl_.get_service().open(impl_.get_implementation(), peer_endpoint.protocol(), ec); if (ec) { ASIO_SYNC_OP_VOID_RETURN(ec); } } impl_.get_service().connect(impl_.get_implementation(), peer_endpoint, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Start an asynchronous connect. /** * This function is used to asynchronously connect a socket to the specified * remote endpoint. The function call always returns immediately. * * The socket is automatically opened if it is not already open. If the * connect fails, and the socket was automatically opened, the socket is * not returned to the closed state. * * @param peer_endpoint The remote endpoint to which the socket will be * connected. Copies will be made of the endpoint object as required. * * @param handler The handler to be called when the connection operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void connect_handler(const asio::error_code& error) * { * if (!error) * { * // Connect succeeded. * } * } * * ... * * asio::ip::tcp::socket socket(my_context); * asio::ip::tcp::endpoint endpoint( * asio::ip::address::from_string("1.2.3.4"), 12345); * socket.async_connect(endpoint, connect_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(ConnectHandler, void (asio::error_code)) async_connect(const endpoint_type& peer_endpoint, ASIO_MOVE_ARG(ConnectHandler) handler) { asio::error_code open_ec; if (!is_open()) { const protocol_type protocol = peer_endpoint.protocol(); impl_.get_service().open(impl_.get_implementation(), protocol, open_ec); } return async_initiate( initiate_async_connect(), handler, this, peer_endpoint, open_ec); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @throws asio::system_error Thrown on failure. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode */ template void set_option(const SettableSocketOption& option) { asio::error_code ec; impl_.get_service().set_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } /// Set an option on the socket. /** * This function is used to set an option on the socket. * * @param option The new option value to be set on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa SettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Setting the IPPROTO_TCP/TCP_NODELAY option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::no_delay option(true); * asio::error_code ec; * socket.set_option(option, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template ASIO_SYNC_OP_VOID set_option(const SettableSocketOption& option, asio::error_code& ec) { impl_.get_service().set_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @throws asio::system_error Thrown on failure. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::socket::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode */ template void get_option(GettableSocketOption& option) const { asio::error_code ec; impl_.get_service().get_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } /// Get an option from the socket. /** * This function is used to get the current value of an option on the socket. * * @param option The option value to be obtained from the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa GettableSocketOption @n * asio::socket_base::broadcast @n * asio::socket_base::do_not_route @n * asio::socket_base::keep_alive @n * asio::socket_base::linger @n * asio::socket_base::receive_buffer_size @n * asio::socket_base::receive_low_watermark @n * asio::socket_base::reuse_address @n * asio::socket_base::send_buffer_size @n * asio::socket_base::send_low_watermark @n * asio::ip::multicast::join_group @n * asio::ip::multicast::leave_group @n * asio::ip::multicast::enable_loopback @n * asio::ip::multicast::outbound_interface @n * asio::ip::multicast::hops @n * asio::ip::tcp::no_delay * * @par Example * Getting the value of the SOL_SOCKET/SO_KEEPALIVE option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::socket::keep_alive option; * asio::error_code ec; * socket.get_option(option, ec); * if (ec) * { * // An error occurred. * } * bool is_set = option.value(); * @endcode */ template ASIO_SYNC_OP_VOID get_option(GettableSocketOption& option, asio::error_code& ec) const { impl_.get_service().get_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::socket::bytes_readable command; * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode */ template void io_control(IoControlCommand& command) { asio::error_code ec; impl_.get_service().io_control(impl_.get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } /// Perform an IO control command on the socket. /** * This function is used to execute an IO control command on the socket. * * @param command The IO control command to be performed on the socket. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::socket_base::bytes_readable @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::socket::bytes_readable command; * asio::error_code ec; * socket.io_control(command, ec); * if (ec) * { * // An error occurred. * } * std::size_t bytes_readable = command.get(); * @endcode */ template ASIO_SYNC_OP_VOID io_control(IoControlCommand& command, asio::error_code& ec) { impl_.get_service().io_control(impl_.get_implementation(), command, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the socket. /** * @returns @c true if the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return impl_.get_service().non_blocking(impl_.get_implementation()); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } /// Sets the non-blocking mode of the socket. /** * @param mode If @c true, the socket's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ ASIO_SYNC_OP_VOID non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the native socket implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native socket. This mode has no effect on the behaviour of the socket * object's synchronous operations. * * @returns @c true if the underlying socket is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the socket object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native socket. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_wait(tcp::socket::wait_write, *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_wait(tcp::socket::wait_write, op); * } @endcode */ bool native_non_blocking() const { return impl_.get_service().native_non_blocking(impl_.get_implementation()); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_wait(tcp::socket::wait_write, *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_wait(tcp::socket::wait_write, op); * } @endcode */ void native_non_blocking(bool mode) { asio::error_code ec; impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } /// Sets the non-blocking mode of the native socket implementation. /** * This function is used to modify the non-blocking mode of the underlying * native socket. It has no effect on the behaviour of the socket object's * synchronous operations. * * @param mode If @c true, the underlying socket is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. * * @par Example * This function is intended to allow the encapsulation of arbitrary * non-blocking system calls as asynchronous operations, in a way that is * transparent to the user of the socket object. The following example * illustrates how Linux's @c sendfile system call might be encapsulated: * @code template * struct sendfile_op * { * tcp::socket& sock_; * int fd_; * Handler handler_; * off_t offset_; * std::size_t total_bytes_transferred_; * * // Function call operator meeting WriteHandler requirements. * // Used as the handler for the async_write_some operation. * void operator()(asio::error_code ec, std::size_t) * { * // Put the underlying socket into non-blocking mode. * if (!ec) * if (!sock_.native_non_blocking()) * sock_.native_non_blocking(true, ec); * * if (!ec) * { * for (;;) * { * // Try the system call. * errno = 0; * int n = ::sendfile(sock_.native_handle(), fd_, &offset_, 65536); * ec = asio::error_code(n < 0 ? errno : 0, * asio::error::get_system_category()); * total_bytes_transferred_ += ec ? 0 : n; * * // Retry operation immediately if interrupted by signal. * if (ec == asio::error::interrupted) * continue; * * // Check if we need to run the operation again. * if (ec == asio::error::would_block * || ec == asio::error::try_again) * { * // We have to wait for the socket to become ready again. * sock_.async_wait(tcp::socket::wait_write, *this); * return; * } * * if (ec || n == 0) * { * // An error occurred, or we have reached the end of the file. * // Either way we must exit the loop so we can call the handler. * break; * } * * // Loop around to try calling sendfile again. * } * } * * // Pass result back to user's handler. * handler_(ec, total_bytes_transferred_); * } * }; * * template * void async_sendfile(tcp::socket& sock, int fd, Handler h) * { * sendfile_op op = { sock, fd, h, 0, 0 }; * sock.async_wait(tcp::socket::wait_write, op); * } @endcode */ ASIO_SYNC_OP_VOID native_non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @returns An object that represents the local endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(); * @endcode */ endpoint_type local_endpoint() const { asio::error_code ec; endpoint_type ep = impl_.get_service().local_endpoint( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "local_endpoint"); return ep; } /// Get the local endpoint of the socket. /** * This function is used to obtain the locally bound endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the local endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.local_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type local_endpoint(asio::error_code& ec) const { return impl_.get_service().local_endpoint(impl_.get_implementation(), ec); } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @returns An object that represents the remote endpoint of the socket. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(); * @endcode */ endpoint_type remote_endpoint() const { asio::error_code ec; endpoint_type ep = impl_.get_service().remote_endpoint( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "remote_endpoint"); return ep; } /// Get the remote endpoint of the socket. /** * This function is used to obtain the remote endpoint of the socket. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the remote endpoint of the socket. * Returns a default-constructed endpoint object if an error occurred. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = socket.remote_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type remote_endpoint(asio::error_code& ec) const { return impl_.get_service().remote_endpoint(impl_.get_implementation(), ec); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @throws asio::system_error Thrown on failure. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(my_context); * ... * socket.shutdown(asio::ip::tcp::socket::shutdown_send); * @endcode */ void shutdown(shutdown_type what) { asio::error_code ec; impl_.get_service().shutdown(impl_.get_implementation(), what, ec); asio::detail::throw_error(ec, "shutdown"); } /// Disable sends or receives on the socket. /** * This function is used to disable send operations, receive operations, or * both. * * @param what Determines what types of operation will no longer be allowed. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Shutting down the send side of the socket: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::error_code ec; * socket.shutdown(asio::ip::tcp::socket::shutdown_send, ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID shutdown(shutdown_type what, asio::error_code& ec) { impl_.get_service().shutdown(impl_.get_implementation(), what, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Wait for the socket to become ready to read, ready to write, or to have /// pending error conditions. /** * This function is used to perform a blocking wait for a socket to enter * a ready to read, write or error condition state. * * @param w Specifies the desired socket state. * * @par Example * Waiting for a socket to become readable. * @code * asio::ip::tcp::socket socket(my_context); * ... * socket.wait(asio::ip::tcp::socket::wait_read); * @endcode */ void wait(wait_type w) { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), w, ec); asio::detail::throw_error(ec, "wait"); } /// Wait for the socket to become ready to read, ready to write, or to have /// pending error conditions. /** * This function is used to perform a blocking wait for a socket to enter * a ready to read, write or error condition state. * * @param w Specifies the desired socket state. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Waiting for a socket to become readable. * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::error_code ec; * socket.wait(asio::ip::tcp::socket::wait_read, ec); * @endcode */ ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), w, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Asynchronously wait for the socket to become ready to read, ready to /// write, or to have pending error conditions. /** * This function is used to perform an asynchronous wait for a socket to enter * a ready to read, write or error condition state. * * @param w Specifies the desired socket state. * * @param handler The handler to be called when the wait operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void wait_handler(const asio::error_code& error) * { * if (!error) * { * // Wait succeeded. * } * } * * ... * * asio::ip::tcp::socket socket(my_context); * ... * socket.async_wait(asio::ip::tcp::socket::wait_read, wait_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(wait_type w, ASIO_MOVE_ARG(WaitHandler) handler) { return async_initiate( initiate_async_wait(), handler, this, w); } protected: /// Protected destructor to prevent deletion through this type. /** * This function destroys the socket, cancelling any outstanding asynchronous * operations associated with the socket as if by calling @c cancel. */ ~basic_socket() { } #if defined(ASIO_WINDOWS_RUNTIME) detail::io_object_impl< detail::null_socket_service, Executor> impl_; #elif defined(ASIO_HAS_IOCP) detail::io_object_impl< detail::win_iocp_socket_service, Executor> impl_; #else detail::io_object_impl< detail::reactive_socket_service, Executor> impl_; #endif private: // Disallow copying and assignment. basic_socket(const basic_socket&) ASIO_DELETED; basic_socket& operator=(const basic_socket&) ASIO_DELETED; struct initiate_async_connect { template void operator()(ASIO_MOVE_ARG(ConnectHandler) handler, basic_socket* self, const endpoint_type& peer_endpoint, const asio::error_code& open_ec) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ConnectHandler. ASIO_CONNECT_HANDLER_CHECK(ConnectHandler, handler) type_check; if (open_ec) { asio::post(self->impl_.get_executor(), asio::detail::bind_handler( ASIO_MOVE_CAST(ConnectHandler)(handler), open_ec)); } else { detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_connect( self->impl_.get_implementation(), peer_endpoint, handler2.value, self->impl_.get_implementation_executor()); } } }; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(WaitHandler) handler, basic_socket* self, wait_type w) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), w, handler2.value, self->impl_.get_implementation_executor()); } }; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SOCKET_HPP galera-4-26.4.25/asio/asio/read.hpp000644 000164 177776 00000142723 15107057155 020037 0ustar00jenkinsnogroup000000 000000 // // read.hpp // ~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_READ_HPP #define ASIO_READ_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/buffer.hpp" #include "asio/error.hpp" #if !defined(ASIO_NO_EXTENSIONS) # include "asio/basic_streambuf_fwd.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup read asio::read * * @brief The @c read function is a composed operation that reads a certain * amount of data from a stream before returning. */ /*@{*/ /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read(s, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read(s, asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, asio::error_code& ec, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::read(s, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read(SyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read(SyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b The basic_streambuf object into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read( * s, b, * asio::transfer_all()); @endcode */ template std::size_t read(SyncReadStream& s, basic_streambuf& b); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b The basic_streambuf object into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read( * s, b, * asio::transfer_all(), ec); @endcode */ template std::size_t read(SyncReadStream& s, basic_streambuf& b, asio::error_code& ec); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read(SyncReadStream& s, basic_streambuf& b, CompletionCondition completion_condition); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param b The basic_streambuf object into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read(SyncReadStream& s, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all()); @endcode */ template std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::read( * s, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Attempt to read a certain amount of data from a stream before returning. /** * This function is used to read a certain number of bytes of data from a * stream. The call will block until one of the following conditions is true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * read_some function. * * @param s The stream from which the data is to be read. The type must support * the SyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's read_some function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t read(SyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, asio::error_code& ec, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /*@}*/ /** * @defgroup async_read asio::async_read * * @brief The @c async_read function is a composed asynchronous operation that * reads a certain amount of data from a stream before completion. */ /*@{*/ /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * asio::async_read(s, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::async_read( * s, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The supplied buffers are full. That is, the bytes transferred is equal to * the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers One or more buffers into which the data will be read. The sum * of the buffer sizes indicates the maximum number of bytes to read from the * stream. Although the buffers object may be copied as necessary, ownership of * the underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's async_read_some function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code asio::async_read(s, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, const MutableBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_mutable_buffer_sequence::value >::type* = 0); #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload is equivalent to calling: * @code asio::async_read( * s, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's async_read_some function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, ASIO_MOVE_ARG(DynamicBuffer_v1) buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v1::type>::value && !is_dynamic_buffer_v2::type>::value >::type* = 0); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload is equivalent to calling: * @code asio::async_read( * s, b, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, basic_streambuf& b, ASIO_MOVE_ARG(ReadHandler) handler); /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The supplied buffer is full (that is, it has reached maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param b A basic_streambuf object into which the data will be read. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's async_read_some function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note This overload is equivalent to calling: * @code asio::async_read( * s, buffers, * asio::transfer_all(), * handler); @endcode */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, DynamicBuffer_v2 buffers, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /// Start an asynchronous operation to read a certain amount of data from a /// stream. /** * This function is used to asynchronously read a certain number of bytes of * data from a stream. The function call always returns immediately. The * asynchronous operation will continue until one of the following conditions is * true: * * @li The specified dynamic buffer sequence is full (that is, it has reached * maximum size). * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the stream's * async_read_some function, and is known as a composed operation. The * program must ensure that the stream performs no other read operations (such * as async_read, the stream's async_read_some function, or any other composed * operations that perform reads) until this operation completes. * * @param s The stream from which the data is to be read. The type must support * the AsyncReadStream concept. * * @param buffers The dynamic buffer sequence into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the read operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_read_some operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the read operation is complete. A non-zero * return value indicates the maximum number of bytes to be read on the next * call to the stream's async_read_some function. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * * std::size_t bytes_transferred // Number of bytes copied into the * // buffers. If an error occurred, * // this will be the number of * // bytes successfully transferred * // prior to the error. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read(AsyncReadStream& s, DynamicBuffer_v2 buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(ReadHandler) handler, typename enable_if< is_dynamic_buffer_v2::value >::type* = 0); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/read.hpp" #endif // ASIO_READ_HPP galera-4-26.4.25/asio/asio/async_result.hpp000644 000164 177776 00000027517 15107057155 021642 0ustar00jenkinsnogroup000000 000000 // // async_result.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ASYNC_RESULT_HPP #define ASIO_ASYNC_RESULT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// An interface for customising the behaviour of an initiating function. /** * The async_result traits class is used for determining: * * @li the concrete completion handler type to be called at the end of the * asynchronous operation; * * @li the initiating function return type; and * * @li how the return value of the initiating function is obtained. * * The trait allows the handler and return types to be determined at the point * where the specific completion handler signature is known. * * This template may be specialised for user-defined completion token types. * The primary template assumes that the CompletionToken is the completion * handler. */ template class async_result { public: /// The concrete completion handler type for the specific signature. typedef CompletionToken completion_handler_type; /// The return type of the initiating function. typedef void return_type; /// Construct an async result from a given handler. /** * When using a specalised async_result, the constructor has an opportunity * to initialise some state associated with the completion handler, which is * then returned from the initiating function. */ explicit async_result(completion_handler_type& h) { (void)h; } /// Obtain the value to be returned from the initiating function. return_type get() { } #if defined(ASIO_HAS_VARIADIC_TEMPLATES) \ || defined(GENERATING_DOCUMENTATION) /// Initiate the asynchronous operation that will produce the result, and /// obtain the value to be returned from the initiating function. template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken) token, ASIO_MOVE_ARG(Args)... args) { ASIO_MOVE_CAST(Initiation)(initiation)( ASIO_MOVE_CAST(RawCompletionToken)(token), ASIO_MOVE_CAST(Args)(args)...); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) template static return_type initiate( ASIO_MOVE_ARG(Initiation) initiation, ASIO_MOVE_ARG(RawCompletionToken) token) { ASIO_MOVE_CAST(Initiation)(initiation)( ASIO_MOVE_CAST(RawCompletionToken)(token)); } #define ASIO_PRIVATE_INITIATE_DEF(n) \ template \ static return_type initiate( \ ASIO_MOVE_ARG(Initiation) initiation, \ ASIO_MOVE_ARG(RawCompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ ASIO_MOVE_CAST(Initiation)(initiation)( \ ASIO_MOVE_CAST(RawCompletionToken)(token), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF) #undef ASIO_PRIVATE_INITIATE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) private: async_result(const async_result&) ASIO_DELETED; async_result& operator=(const async_result&) ASIO_DELETED; }; /// Helper template to deduce the handler type from a CompletionToken, capture /// a local copy of the handler, and then create an async_result for the /// handler. template struct async_completion { /// The real handler type to be used for the asynchronous operation. typedef typename asio::async_result< typename decay::type, Signature>::completion_handler_type completion_handler_type; #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Constructor. /** * The constructor creates the concrete completion handler and makes the link * between the handler and the asynchronous result. */ explicit async_completion(CompletionToken& token) : completion_handler(static_cast::value, completion_handler_type&, CompletionToken&&>::type>(token)), result(completion_handler) { } #else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) explicit async_completion(typename decay::type& token) : completion_handler(token), result(completion_handler) { } explicit async_completion(const typename decay::type& token) : completion_handler(token), result(completion_handler) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// A copy of, or reference to, a real handler object. #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) typename conditional< is_same::value, completion_handler_type&, completion_handler_type>::type completion_handler; #else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) completion_handler_type completion_handler; #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// The result of the asynchronous operation's initiating function. async_result::type, Signature> result; }; namespace detail { template struct async_result_helper : async_result::type, Signature> { }; struct async_result_memfns_base { void initiate(); }; template struct async_result_memfns_derived : T, async_result_memfns_base { }; template struct async_result_memfns_check { }; template char (&async_result_initiate_memfn_helper(...))[2]; template char async_result_initiate_memfn_helper( async_result_memfns_check< void (async_result_memfns_base::*)(), &async_result_memfns_derived::initiate>*); template struct async_result_has_initiate_memfn : integral_constant::type, Signature> >(0)) != 1> { }; } // namespace detail #if defined(GENERATING_DOCUMENTATION) # define ASIO_INITFN_RESULT_TYPE(ct, sig) \ void_or_deduced #elif defined(_MSC_VER) && (_MSC_VER < 1500) # define ASIO_INITFN_RESULT_TYPE(ct, sig) \ typename ::asio::detail::async_result_helper< \ ct, sig>::return_type #define ASIO_HANDLER_TYPE(ct, sig) \ typename ::asio::detail::async_result_helper< \ ct, sig>::completion_handler_type #else # define ASIO_INITFN_RESULT_TYPE(ct, sig) \ typename ::asio::async_result< \ typename ::asio::decay::type, sig>::return_type #define ASIO_HANDLER_TYPE(ct, sig) \ typename ::asio::async_result< \ typename ::asio::decay::type, sig>::completion_handler_type #endif #if defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) async_initiate(ASIO_MOVE_ARG(Initiation) initiation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken), ASIO_MOVE_ARG(Args)... args); #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) template inline typename enable_if< detail::async_result_has_initiate_memfn::value, ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type async_initiate(ASIO_MOVE_ARG(Initiation) initiation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, ASIO_MOVE_ARG(Args)... args) { return async_result::type, Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation), ASIO_MOVE_CAST(CompletionToken)(token), ASIO_MOVE_CAST(Args)(args)...); } template inline typename enable_if< !detail::async_result_has_initiate_memfn::value, ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type async_initiate(ASIO_MOVE_ARG(Initiation) initiation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, ASIO_MOVE_ARG(Args)... args) { async_completion completion(token); ASIO_MOVE_CAST(Initiation)(initiation)( ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken, Signature))(completion.completion_handler), ASIO_MOVE_CAST(Args)(args)...); return completion.result.get(); } #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template inline typename enable_if< detail::async_result_has_initiate_memfn::value, ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type async_initiate(ASIO_MOVE_ARG(Initiation) initiation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token) { return async_result::type, Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation), ASIO_MOVE_CAST(CompletionToken)(token)); } template inline typename enable_if< !detail::async_result_has_initiate_memfn::value, ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type async_initiate(ASIO_MOVE_ARG(Initiation) initiation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token) { async_completion completion(token); ASIO_MOVE_CAST(Initiation)(initiation)( ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken, Signature))(completion.completion_handler)); return completion.result.get(); } #define ASIO_PRIVATE_INITIATE_DEF(n) \ template \ inline typename enable_if< \ detail::async_result_has_initiate_memfn< \ CompletionToken, Signature>::value, \ ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type \ async_initiate(ASIO_MOVE_ARG(Initiation) initiation, \ ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return async_result::type, \ Signature>::initiate(ASIO_MOVE_CAST(Initiation)(initiation), \ ASIO_MOVE_CAST(CompletionToken)(token), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template \ inline typename enable_if< \ !detail::async_result_has_initiate_memfn< \ CompletionToken, Signature>::value, \ ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature)>::type \ async_initiate(ASIO_MOVE_ARG(Initiation) initiation, \ ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ async_completion completion(token); \ \ ASIO_MOVE_CAST(Initiation)(initiation)( \ ASIO_MOVE_CAST(ASIO_HANDLER_TYPE(CompletionToken, \ Signature))(completion.completion_handler), \ ASIO_VARIADIC_MOVE_ARGS(n)); \ \ return completion.result.get(); \ } \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_INITIATE_DEF) #undef ASIO_PRIVATE_INITIATE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_ASYNC_RESULT_HPP galera-4-26.4.25/asio/asio/execution_context.hpp000644 000164 177776 00000033362 15107057155 022671 0ustar00jenkinsnogroup000000 000000 // // execution_context.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_EXECUTION_CONTEXT_HPP #define ASIO_EXECUTION_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/noncopyable.hpp" #include "asio/detail/variadic_templates.hpp" #include "asio/detail/push_options.hpp" namespace asio { class execution_context; class io_context; #if !defined(GENERATING_DOCUMENTATION) template Service& use_service(execution_context&); template Service& use_service(io_context&); template void add_service(execution_context&, Service*); template bool has_service(execution_context&); #endif // !defined(GENERATING_DOCUMENTATION) namespace detail { class service_registry; } /// A context for function object execution. /** * An execution context represents a place where function objects will be * executed. An @c io_context is an example of an execution context. * * @par The execution_context class and services * * Class execution_context implements an extensible, type-safe, polymorphic set * of services, indexed by service type. * * Services exist to manage the resources that are shared across an execution * context. For example, timers may be implemented in terms of a single timer * queue, and this queue would be stored in a service. * * Access to the services of an execution_context is via three function * templates, use_service(), add_service() and has_service(). * * In a call to @c use_service(), the type argument chooses a service, * making available all members of the named type. If @c Service is not present * in an execution_context, an object of type @c Service is created and added * to the execution_context. A C++ program can check if an execution_context * implements a particular service with the function template @c * has_service(). * * Service objects may be explicitly added to an execution_context using the * function template @c add_service(). If the @c Service is already * present, the service_already_exists exception is thrown. If the owner of the * service is not the same object as the execution_context parameter, the * invalid_service_owner exception is thrown. * * Once a service reference is obtained from an execution_context object by * calling use_service(), that reference remains usable as long as the owning * execution_context object exists. * * All service implementations have execution_context::service as a public base * class. Custom services may be implemented by deriving from this class and * then added to an execution_context using the facilities described above. * * @par The execution_context as a base class * * Class execution_context may be used only as a base class for concrete * execution context types. The @c io_context is an example of such a derived * type. * * On destruction, a class that is derived from execution_context must perform * execution_context::shutdown() followed by * execution_context::destroy(). * * This destruction sequence permits programs to simplify their resource * management by using @c shared_ptr<>. Where an object's lifetime is tied to * the lifetime of a connection (or some other sequence of asynchronous * operations), a @c shared_ptr to the object would be bound into the handlers * for all asynchronous operations associated with it. This works as follows: * * @li When a single connection ends, all associated asynchronous operations * complete. The corresponding handler objects are destroyed, and all @c * shared_ptr references to the objects are destroyed. * * @li To shut down the whole program, the io_context function stop() is called * to terminate any run() calls as soon as possible. The io_context destructor * calls @c shutdown() and @c destroy() to destroy all pending handlers, * causing all @c shared_ptr references to all connection objects to be * destroyed. */ class execution_context : private noncopyable { public: class id; class service; public: /// Constructor. ASIO_DECL execution_context(); /// Destructor. ASIO_DECL ~execution_context(); protected: /// Shuts down all services in the context. /** * This function is implemented as follows: * * @li For each service object @c svc in the execution_context set, in * reverse order of the beginning of service object lifetime, performs @c * svc->shutdown(). */ ASIO_DECL void shutdown(); /// Destroys all services in the context. /** * This function is implemented as follows: * * @li For each service object @c svc in the execution_context set, in * reverse order * of the beginning of service object lifetime, performs * delete static_cast(svc). */ ASIO_DECL void destroy(); public: /// Fork-related event notifications. enum fork_event { /// Notify the context that the process is about to fork. fork_prepare, /// Notify the context that the process has forked and is the parent. fork_parent, /// Notify the context that the process has forked and is the child. fork_child }; /// Notify the execution_context of a fork-related event. /** * This function is used to inform the execution_context that the process is * about to fork, or has just forked. This allows the execution_context, and * the services it contains, to perform any necessary housekeeping to ensure * correct operation following a fork. * * This function must not be called while any other execution_context * function, or any function associated with the execution_context's derived * class, is being called in another thread. It is, however, safe to call * this function from within a completion handler, provided no other thread * is accessing the execution_context or its derived class. * * @param event A fork-related event. * * @throws asio::system_error Thrown on failure. If the notification * fails the execution_context object should no longer be used and should be * destroyed. * * @par Example * The following code illustrates how to incorporate the notify_fork() * function: * @code my_execution_context.notify_fork(execution_context::fork_prepare); * if (fork() == 0) * { * // This is the child process. * my_execution_context.notify_fork(execution_context::fork_child); * } * else * { * // This is the parent process. * my_execution_context.notify_fork(execution_context::fork_parent); * } @endcode * * @note For each service object @c svc in the execution_context set, * performs svc->notify_fork();. When processing the fork_prepare * event, services are visited in reverse order of the beginning of service * object lifetime. Otherwise, services are visited in order of the beginning * of service object lifetime. */ ASIO_DECL void notify_fork(fork_event event); /// Obtain the service object corresponding to the given type. /** * This function is used to locate a service object that corresponds to the * given service type. If there is no existing implementation of the service, * then the execution_context will create a new instance of the service. * * @param e The execution_context object that owns the service. * * @return The service interface implementing the specified service type. * Ownership of the service interface is not transferred to the caller. */ template friend Service& use_service(execution_context& e); /// Obtain the service object corresponding to the given type. /** * This function is used to locate a service object that corresponds to the * given service type. If there is no existing implementation of the service, * then the io_context will create a new instance of the service. * * @param ioc The io_context object that owns the service. * * @return The service interface implementing the specified service type. * Ownership of the service interface is not transferred to the caller. * * @note This overload is preserved for backwards compatibility with services * that inherit from io_context::service. */ template friend Service& use_service(io_context& ioc); #if defined(GENERATING_DOCUMENTATION) /// Creates a service object and adds it to the execution_context. /** * This function is used to add a service to the execution_context. * * @param e The execution_context object that owns the service. * * @param args Zero or more arguments to be passed to the service * constructor. * * @throws asio::service_already_exists Thrown if a service of the * given type is already present in the execution_context. */ template friend Service& make_service(execution_context& e, Args&&... args); #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) template friend Service& make_service(execution_context& e, ASIO_MOVE_ARG(Args)... args); #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) template friend Service& make_service(execution_context& e); #define ASIO_PRIVATE_MAKE_SERVICE_DEF(n) \ template \ friend Service& make_service(execution_context& e, \ ASIO_VARIADIC_MOVE_PARAMS(n)); \ /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_MAKE_SERVICE_DEF) #undef ASIO_PRIVATE_MAKE_SERVICE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) /// (Deprecated: Use make_service().) Add a service object to the /// execution_context. /** * This function is used to add a service to the execution_context. * * @param e The execution_context object that owns the service. * * @param svc The service object. On success, ownership of the service object * is transferred to the execution_context. When the execution_context object * is destroyed, it will destroy the service object by performing: @code * delete static_cast(svc) @endcode * * @throws asio::service_already_exists Thrown if a service of the * given type is already present in the execution_context. * * @throws asio::invalid_service_owner Thrown if the service's owning * execution_context is not the execution_context object specified by the * @c e parameter. */ template friend void add_service(execution_context& e, Service* svc); /// Determine if an execution_context contains a specified service type. /** * This function is used to determine whether the execution_context contains a * service object corresponding to the given service type. * * @param e The execution_context object that owns the service. * * @return A boolean indicating whether the execution_context contains the * service. */ template friend bool has_service(execution_context& e); private: // The service registry. asio::detail::service_registry* service_registry_; }; /// Class used to uniquely identify a service. class execution_context::id : private noncopyable { public: /// Constructor. id() {} }; /// Base class for all io_context services. class execution_context::service : private noncopyable { public: /// Get the context object that owns the service. execution_context& context(); protected: /// Constructor. /** * @param owner The execution_context object that owns the service. */ ASIO_DECL service(execution_context& owner); /// Destructor. ASIO_DECL virtual ~service(); private: /// Destroy all user-defined handler objects owned by the service. virtual void shutdown() = 0; /// Handle notification of a fork-related event to perform any necessary /// housekeeping. /** * This function is not a pure virtual so that services only have to * implement it if necessary. The default implementation does nothing. */ ASIO_DECL virtual void notify_fork( execution_context::fork_event event); friend class asio::detail::service_registry; struct key { key() : type_info_(0), id_(0) {} const std::type_info* type_info_; const execution_context::id* id_; } key_; execution_context& owner_; service* next_; }; /// Exception thrown when trying to add a duplicate service to an /// execution_context. class service_already_exists : public std::logic_error { public: ASIO_DECL service_already_exists(); }; /// Exception thrown when trying to add a service object to an /// execution_context where the service has a different owner. class invalid_service_owner : public std::logic_error { public: ASIO_DECL invalid_service_owner(); }; namespace detail { // Special derived service id type to keep classes header-file only. template class service_id : public execution_context::id { }; // Special service base class to keep classes header-file only. template class execution_context_service_base : public execution_context::service { public: static service_id id; // Constructor. execution_context_service_base(execution_context& e) : execution_context::service(e) { } }; template service_id execution_context_service_base::id; } // namespace detail } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/execution_context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/execution_context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_EXECUTION_CONTEXT_HPP galera-4-26.4.25/asio/asio/basic_signal_set.hpp000644 000164 177776 00000044232 15107057155 022411 0ustar00jenkinsnogroup000000 000000 // // basic_signal_set.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SIGNAL_SET_HPP #define ASIO_BASIC_SIGNAL_SET_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/signal_set_service.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" namespace asio { /// Provides signal functionality. /** * The basic_signal_set class provides the ability to perform an asynchronous * wait for one or more signals to occur. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * Performing an asynchronous wait: * @code * void handler( * const asio::error_code& error, * int signal_number) * { * if (!error) * { * // A signal occurred. * } * } * * ... * * // Construct a signal set registered for process termination. * asio::signal_set signals(my_context, SIGINT, SIGTERM); * * // Start an asynchronous wait for one of the signals to occur. * signals.async_wait(handler); * @endcode * * @par Queueing of signal notifications * * If a signal is registered with a signal_set, and the signal occurs when * there are no waiting handlers, then the signal notification is queued. The * next async_wait operation on that signal_set will dequeue the notification. * If multiple notifications are queued, subsequent async_wait operations * dequeue them one at a time. Signal notifications are dequeued in order of * ascending signal number. * * If a signal number is removed from a signal_set (using the @c remove or @c * erase member functions) then any queued notifications for that signal are * discarded. * * @par Multiple registration of signals * * The same signal number may be registered with different signal_set objects. * When the signal occurs, one handler is called for each signal_set object. * * Note that multiple registration only works for signals that are registered * using Asio. The application must not also register a signal handler using * functions such as @c signal() or @c sigaction(). * * @par Signal masking on POSIX platforms * * POSIX allows signals to be blocked using functions such as @c sigprocmask() * and @c pthread_sigmask(). For signals to be delivered, programs must ensure * that any signals registered using signal_set objects are unblocked in at * least one thread. */ template class basic_signal_set { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// Construct a signal set without adding any signals. /** * This constructor creates a signal set without registering for any signals. * * @param ex The I/O executor that the signal set will use, by default, to * dispatch handlers for any asynchronous operations performed on the * signal set. */ explicit basic_signal_set(const executor_type& ex) : impl_(ex) { } /// Construct a signal set without adding any signals. /** * This constructor creates a signal set without registering for any signals. * * @param context An execution context which provides the I/O executor that * the signal set will use, by default, to dispatch handlers for any * asynchronous operations performed on the signal set. */ template explicit basic_signal_set(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Construct a signal set and add one signal. /** * This constructor creates a signal set and registers for one signal. * * @param ex The I/O executor that the signal set will use, by default, to * dispatch handlers for any asynchronous operations performed on the * signal set. * * @param signal_number_1 The signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(ex); * signals.add(signal_number_1); @endcode */ basic_signal_set(const executor_type& ex, int signal_number_1) : impl_(ex) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add one signal. /** * This constructor creates a signal set and registers for one signal. * * @param context An execution context which provides the I/O executor that * the signal set will use, by default, to dispatch handlers for any * asynchronous operations performed on the signal set. * * @param signal_number_1 The signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(context); * signals.add(signal_number_1); @endcode */ template basic_signal_set(ExecutionContext& context, int signal_number_1, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add two signals. /** * This constructor creates a signal set and registers for two signals. * * @param ex The I/O executor that the signal set will use, by default, to * dispatch handlers for any asynchronous operations performed on the * signal set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(ex); * signals.add(signal_number_1); * signals.add(signal_number_2); @endcode */ basic_signal_set(const executor_type& ex, int signal_number_1, int signal_number_2) : impl_(ex) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add two signals. /** * This constructor creates a signal set and registers for two signals. * * @param context An execution context which provides the I/O executor that * the signal set will use, by default, to dispatch handlers for any * asynchronous operations performed on the signal set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(context); * signals.add(signal_number_1); * signals.add(signal_number_2); @endcode */ template basic_signal_set(ExecutionContext& context, int signal_number_1, int signal_number_2, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add three signals. /** * This constructor creates a signal set and registers for three signals. * * @param ex The I/O executor that the signal set will use, by default, to * dispatch handlers for any asynchronous operations performed on the * signal set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @param signal_number_3 The third signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(ex); * signals.add(signal_number_1); * signals.add(signal_number_2); * signals.add(signal_number_3); @endcode */ basic_signal_set(const executor_type& ex, int signal_number_1, int signal_number_2, int signal_number_3) : impl_(ex) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_3, ec); asio::detail::throw_error(ec, "add"); } /// Construct a signal set and add three signals. /** * This constructor creates a signal set and registers for three signals. * * @param context An execution context which provides the I/O executor that * the signal set will use, by default, to dispatch handlers for any * asynchronous operations performed on the signal set. * * @param signal_number_1 The first signal number to be added. * * @param signal_number_2 The second signal number to be added. * * @param signal_number_3 The third signal number to be added. * * @note This constructor is equivalent to performing: * @code asio::signal_set signals(context); * signals.add(signal_number_1); * signals.add(signal_number_2); * signals.add(signal_number_3); @endcode */ template basic_signal_set(ExecutionContext& context, int signal_number_1, int signal_number_2, int signal_number_3, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number_1, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_2, ec); asio::detail::throw_error(ec, "add"); impl_.get_service().add(impl_.get_implementation(), signal_number_3, ec); asio::detail::throw_error(ec, "add"); } /// Destroys the signal set. /** * This function destroys the signal set, cancelling any outstanding * asynchronous wait operations associated with the signal set as if by * calling @c cancel. */ ~basic_signal_set() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Add a signal to a signal_set. /** * This function adds the specified signal to the set. It has no effect if the * signal is already in the set. * * @param signal_number The signal to be added to the set. * * @throws asio::system_error Thrown on failure. */ void add(int signal_number) { asio::error_code ec; impl_.get_service().add(impl_.get_implementation(), signal_number, ec); asio::detail::throw_error(ec, "add"); } /// Add a signal to a signal_set. /** * This function adds the specified signal to the set. It has no effect if the * signal is already in the set. * * @param signal_number The signal to be added to the set. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID add(int signal_number, asio::error_code& ec) { impl_.get_service().add(impl_.get_implementation(), signal_number, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Remove a signal from a signal_set. /** * This function removes the specified signal from the set. It has no effect * if the signal is not in the set. * * @param signal_number The signal to be removed from the set. * * @throws asio::system_error Thrown on failure. * * @note Removes any notifications that have been queued for the specified * signal number. */ void remove(int signal_number) { asio::error_code ec; impl_.get_service().remove(impl_.get_implementation(), signal_number, ec); asio::detail::throw_error(ec, "remove"); } /// Remove a signal from a signal_set. /** * This function removes the specified signal from the set. It has no effect * if the signal is not in the set. * * @param signal_number The signal to be removed from the set. * * @param ec Set to indicate what error occurred, if any. * * @note Removes any notifications that have been queued for the specified * signal number. */ ASIO_SYNC_OP_VOID remove(int signal_number, asio::error_code& ec) { impl_.get_service().remove(impl_.get_implementation(), signal_number, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Remove all signals from a signal_set. /** * This function removes all signals from the set. It has no effect if the set * is already empty. * * @throws asio::system_error Thrown on failure. * * @note Removes all queued notifications. */ void clear() { asio::error_code ec; impl_.get_service().clear(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "clear"); } /// Remove all signals from a signal_set. /** * This function removes all signals from the set. It has no effect if the set * is already empty. * * @param ec Set to indicate what error occurred, if any. * * @note Removes all queued notifications. */ ASIO_SYNC_OP_VOID clear(asio::error_code& ec) { impl_.get_service().clear(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Cancel all operations associated with the signal set. /** * This function forces the completion of any pending asynchronous wait * operations against the signal set. The handler for each cancelled * operation will be invoked with the asio::error::operation_aborted * error code. * * Cancellation does not alter the set of registered signals. * * @throws asio::system_error Thrown on failure. * * @note If a registered signal occurred before cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all operations associated with the signal set. /** * This function forces the completion of any pending asynchronous wait * operations against the signal set. The handler for each cancelled * operation will be invoked with the asio::error::operation_aborted * error code. * * Cancellation does not alter the set of registered signals. * * @param ec Set to indicate what error occurred, if any. * * @note If a registered signal occurred before cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Start an asynchronous operation to wait for a signal to be delivered. /** * This function may be used to initiate an asynchronous wait against the * signal set. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li One of the registered signals in the signal set occurs; or * * @li The signal set was cancelled, in which case the handler is passed the * error code asio::error::operation_aborted. * * @param handler The handler to be called when the signal occurs. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * int signal_number // Indicates which signal occurred. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(SignalHandler, void (asio::error_code, int)) async_wait(ASIO_MOVE_ARG(SignalHandler) handler) { return async_initiate( initiate_async_wait(), handler, this); } private: // Disallow copying and assignment. basic_signal_set(const basic_signal_set&) ASIO_DELETED; basic_signal_set& operator=(const basic_signal_set&) ASIO_DELETED; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(SignalHandler) handler, basic_signal_set* self) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a SignalHandler. ASIO_SIGNAL_HANDLER_CHECK(SignalHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), handler2.value, self->impl_.get_implementation_executor()); } }; detail::io_object_impl impl_; }; } // namespace asio #endif // ASIO_BASIC_SIGNAL_SET_HPP galera-4-26.4.25/asio/asio/strand.hpp000644 000164 177776 00000022327 15107057155 020414 0ustar00jenkinsnogroup000000 000000 // // strand.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_STRAND_HPP #define ASIO_STRAND_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/strand_executor_service.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Provides serialised function invocation for any executor type. template class strand { public: /// The type of the underlying executor. typedef Executor inner_executor_type; /// Default constructor. /** * This constructor is only valid if the underlying executor type is default * constructible. */ strand() : executor_(), impl_(use_service( executor_.context()).create_implementation()) { } /// Construct a strand for the specified executor. explicit strand(const Executor& e) : executor_(e), impl_(use_service( executor_.context()).create_implementation()) { } /// Copy constructor. strand(const strand& other) ASIO_NOEXCEPT : executor_(other.executor_), impl_(other.impl_) { } /// Converting constructor. /** * This constructor is only valid if the @c OtherExecutor type is convertible * to @c Executor. */ template strand( const strand& other) ASIO_NOEXCEPT : executor_(other.executor_), impl_(other.impl_) { } /// Assignment operator. strand& operator=(const strand& other) ASIO_NOEXCEPT { executor_ = other.executor_; impl_ = other.impl_; return *this; } /// Converting assignment operator. /** * This assignment operator is only valid if the @c OtherExecutor type is * convertible to @c Executor. */ template strand& operator=( const strand& other) ASIO_NOEXCEPT { executor_ = other.executor_; impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. strand(strand&& other) ASIO_NOEXCEPT : executor_(ASIO_MOVE_CAST(Executor)(other.executor_)), impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)) { } /// Converting move constructor. /** * This constructor is only valid if the @c OtherExecutor type is convertible * to @c Executor. */ template strand(strand&& other) ASIO_NOEXCEPT : executor_(ASIO_MOVE_CAST(OtherExecutor)(other)), impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)) { } /// Move assignment operator. strand& operator=(strand&& other) ASIO_NOEXCEPT { executor_ = ASIO_MOVE_CAST(Executor)(other); impl_ = ASIO_MOVE_CAST(implementation_type)(other.impl_); return *this; } /// Converting move assignment operator. /** * This assignment operator is only valid if the @c OtherExecutor type is * convertible to @c Executor. */ template strand& operator=( const strand&& other) ASIO_NOEXCEPT { executor_ = ASIO_MOVE_CAST(OtherExecutor)(other); impl_ = ASIO_MOVE_CAST(implementation_type)(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. ~strand() { } /// Obtain the underlying executor. inner_executor_type get_inner_executor() const ASIO_NOEXCEPT { return executor_; } /// Obtain the underlying execution context. execution_context& context() const ASIO_NOEXCEPT { return executor_.context(); } /// Inform the strand that it has some outstanding work to do. /** * The strand delegates this call to its underlying executor. */ void on_work_started() const ASIO_NOEXCEPT { executor_.on_work_started(); } /// Inform the strand that some work is no longer outstanding. /** * The strand delegates this call to its underlying executor. */ void on_work_finished() const ASIO_NOEXCEPT { executor_.on_work_finished(); } /// Request the strand to invoke the given function object. /** * This function is used to ask the strand to execute the given function * object on its underlying executor. The function object will be executed * inside this function if the strand is not otherwise busy and if the * underlying executor's @c dispatch() function is also able to execute the * function before returning. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { detail::strand_executor_service::dispatch(impl_, executor_, ASIO_MOVE_CAST(Function)(f), a); } /// Request the strand to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled by the underlying executor's defer function. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { detail::strand_executor_service::post(impl_, executor_, ASIO_MOVE_CAST(Function)(f), a); } /// Request the strand to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object will never be executed inside this function. * Instead, it will be scheduled by the underlying executor's defer function. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const { detail::strand_executor_service::defer(impl_, executor_, ASIO_MOVE_CAST(Function)(f), a); } /// Determine whether the strand is running in the current thread. /** * @return @c true if the current thread is executing a function that was * submitted to the strand using post(), dispatch() or defer(). Otherwise * returns @c false. */ bool running_in_this_thread() const ASIO_NOEXCEPT { return detail::strand_executor_service::running_in_this_thread(impl_); } /// Compare two strands for equality. /** * Two strands are equal if they refer to the same ordered, non-concurrent * state. */ friend bool operator==(const strand& a, const strand& b) ASIO_NOEXCEPT { return a.impl_ == b.impl_; } /// Compare two strands for inequality. /** * Two strands are equal if they refer to the same ordered, non-concurrent * state. */ friend bool operator!=(const strand& a, const strand& b) ASIO_NOEXCEPT { return a.impl_ != b.impl_; } private: Executor executor_; typedef detail::strand_executor_service::implementation_type implementation_type; implementation_type impl_; }; /** @defgroup make_strand asio::make_strand * * @brief The asio::make_strand function creates a @ref strand object for * an executor or execution context. */ /*@{*/ /// Create a @ref strand object for an executor. template inline strand make_strand(const Executor& ex, typename enable_if::value>::type* = 0) { return strand(ex); } /// Create a @ref strand object for an execution context. template inline strand make_strand(ExecutionContext& ctx, typename enable_if< is_convertible::value>::type* = 0) { return strand(ctx.get_executor()); } /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" // If both io_context.hpp and strand.hpp have been included, automatically // include the header file needed for the io_context::strand class. #if !defined(ASIO_NO_EXTENSIONS) # if defined(ASIO_IO_CONTEXT_HPP) # include "asio/io_context_strand.hpp" # endif // defined(ASIO_IO_CONTEXT_HPP) #endif // !defined(ASIO_NO_EXTENSIONS) #endif // ASIO_STRAND_HPP galera-4-26.4.25/asio/asio/post.hpp000644 000164 177776 00000007777 15107057155 020122 0ustar00jenkinsnogroup000000 000000 // // post.hpp // ~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_POST_HPP #define ASIO_POST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the object's associated * executor. The function object is queued for execution, and is never called * from the current thread prior to returning from post(). * * The use of @c post(), rather than @ref defer(), indicates the caller's * preference that the function object be eagerly queued for execution. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex by performing * get_associated_executor(handler). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Performs ex.post(std::move(handler), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( ASIO_MOVE_ARG(CompletionToken) token); /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the specified executor. * The function object is queued for execution, and is never called from the * current thread prior to returning from post(). * * The use of @c post(), rather than @ref defer(), indicates the caller's * preference that the function object be eagerly queued for execution. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex1 by performing * get_associated_executor(handler). * * @li Creates a work object @c w by performing make_work(ex1). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Constructs a function object @c f with a function call operator that * performs ex1.dispatch(std::move(handler), alloc) followed by * w.reset(). * * @li Performs Executor(ex).post(std::move(f), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); /// Submits a completion token or function object for execution. /** * @returns post(ctx.get_executor(), forward(token)). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) post( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/post.hpp" #endif // ASIO_POST_HPP galera-4-26.4.25/asio/asio/ip/000755 000164 177776 00000000000 15107057160 017006 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ip/resolver_base.hpp000644 000164 177776 00000007101 15107057155 022355 0ustar00jenkinsnogroup000000 000000 // // ip/resolver_base.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_RESOLVER_BASE_HPP #define ASIO_IP_RESOLVER_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// The resolver_base class is used as a base for the basic_resolver class /// templates to provide a common place to define the flag constants. class resolver_base { public: #if defined(GENERATING_DOCUMENTATION) /// A bitmask type (C++ Std [lib.bitmask.types]). typedef unspecified flags; /// Determine the canonical name of the host specified in the query. static const flags canonical_name = implementation_defined; /// Indicate that returned endpoint is intended for use as a locally bound /// socket endpoint. static const flags passive = implementation_defined; /// Host name should be treated as a numeric string defining an IPv4 or IPv6 /// address and no name resolution should be attempted. static const flags numeric_host = implementation_defined; /// Service name should be treated as a numeric string defining a port number /// and no name resolution should be attempted. static const flags numeric_service = implementation_defined; /// If the query protocol family is specified as IPv6, return IPv4-mapped /// IPv6 addresses on finding no IPv6 addresses. static const flags v4_mapped = implementation_defined; /// If used with v4_mapped, return all matching IPv6 and IPv4 addresses. static const flags all_matching = implementation_defined; /// Only return IPv4 addresses if a non-loopback IPv4 address is configured /// for the system. Only return IPv6 addresses if a non-loopback IPv6 address /// is configured for the system. static const flags address_configured = implementation_defined; #else enum flags { canonical_name = ASIO_OS_DEF(AI_CANONNAME), passive = ASIO_OS_DEF(AI_PASSIVE), numeric_host = ASIO_OS_DEF(AI_NUMERICHOST), numeric_service = ASIO_OS_DEF(AI_NUMERICSERV), v4_mapped = ASIO_OS_DEF(AI_V4MAPPED), all_matching = ASIO_OS_DEF(AI_ALL), address_configured = ASIO_OS_DEF(AI_ADDRCONFIG) }; // Implement bitmask operations as shown in C++ Std [lib.bitmask.types]. friend flags operator&(flags x, flags y) { return static_cast( static_cast(x) & static_cast(y)); } friend flags operator|(flags x, flags y) { return static_cast( static_cast(x) | static_cast(y)); } friend flags operator^(flags x, flags y) { return static_cast( static_cast(x) ^ static_cast(y)); } friend flags operator~(flags x) { return static_cast(~static_cast(x)); } friend flags& operator&=(flags& x, flags y) { x = x & y; return x; } friend flags& operator|=(flags& x, flags y) { x = x | y; return x; } friend flags& operator^=(flags& x, flags y) { x = x ^ y; return x; } #endif protected: /// Protected destructor to prevent deletion through this type. ~resolver_base() { } }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_RESOLVER_BASE_HPP galera-4-26.4.25/asio/asio/ip/address_v6_range.hpp000644 000164 177776 00000006112 15107057155 022737 0ustar00jenkinsnogroup000000 000000 // // ip/address_v6_range.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V6_RANGE_HPP #define ASIO_IP_ADDRESS_V6_RANGE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address_v6_iterator.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template class basic_address_range; /// Represents a range of IPv6 addresses. /** * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template <> class basic_address_range { public: /// The type of an iterator that points into the range. typedef basic_address_iterator iterator; /// Construct an empty range. basic_address_range() ASIO_NOEXCEPT : begin_(address_v6()), end_(address_v6()) { } /// Construct an range that represents the given range of addresses. explicit basic_address_range(const iterator& first, const iterator& last) ASIO_NOEXCEPT : begin_(first), end_(last) { } /// Copy constructor. basic_address_range(const basic_address_range& other) ASIO_NOEXCEPT : begin_(other.begin_), end_(other.end_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_address_range(basic_address_range&& other) ASIO_NOEXCEPT : begin_(ASIO_MOVE_CAST(iterator)(other.begin_)), end_(ASIO_MOVE_CAST(iterator)(other.end_)) { } #endif // defined(ASIO_HAS_MOVE) /// Assignment operator. basic_address_range& operator=( const basic_address_range& other) ASIO_NOEXCEPT { begin_ = other.begin_; end_ = other.end_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move assignment operator. basic_address_range& operator=( basic_address_range&& other) ASIO_NOEXCEPT { begin_ = ASIO_MOVE_CAST(iterator)(other.begin_); end_ = ASIO_MOVE_CAST(iterator)(other.end_); return *this; } #endif // defined(ASIO_HAS_MOVE) /// Obtain an iterator that points to the start of the range. iterator begin() const ASIO_NOEXCEPT { return begin_; } /// Obtain an iterator that points to the end of the range. iterator end() const ASIO_NOEXCEPT { return end_; } /// Determine whether the range is empty. bool empty() const ASIO_NOEXCEPT { return begin_ == end_; } /// Find an address in the range. iterator find(const address_v6& addr) const ASIO_NOEXCEPT { return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_; } private: iterator begin_; iterator end_; }; /// Represents a range of IPv6 addresses. typedef basic_address_range address_v6_range; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ADDRESS_V6_RANGE_HPP galera-4-26.4.25/asio/asio/ip/multicast.hpp000644 000164 177776 00000012000 15107057155 021521 0ustar00jenkinsnogroup000000 000000 // // ip/multicast.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_MULTICAST_HPP #define ASIO_IP_MULTICAST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/ip/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace multicast { /// Socket option to join a multicast group on a specified interface. /** * Implements the IPPROTO_IP/IP_ADD_MEMBERSHIP socket option. * * @par Examples * Setting the option to join a multicast group: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::address multicast_address = * asio::ip::address::from_string("225.0.0.1"); * asio::ip::multicast::join_group option(multicast_address); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined join_group; #else typedef asio::ip::detail::socket_option::multicast_request< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_ADD_MEMBERSHIP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_JOIN_GROUP)> join_group; #endif /// Socket option to leave a multicast group on a specified interface. /** * Implements the IPPROTO_IP/IP_DROP_MEMBERSHIP socket option. * * @par Examples * Setting the option to leave a multicast group: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::address multicast_address = * asio::ip::address::from_string("225.0.0.1"); * asio::ip::multicast::leave_group option(multicast_address); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined leave_group; #else typedef asio::ip::detail::socket_option::multicast_request< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_DROP_MEMBERSHIP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_LEAVE_GROUP)> leave_group; #endif /// Socket option for local interface to use for outgoing multicast packets. /** * Implements the IPPROTO_IP/IP_MULTICAST_IF socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::address_v4 local_interface = * asio::ip::address_v4::from_string("1.2.3.4"); * asio::ip::multicast::outbound_interface option(local_interface); * socket.set_option(option); * @endcode * * @par Concepts: * SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined outbound_interface; #else typedef asio::ip::detail::socket_option::network_interface< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_IF), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_IF)> outbound_interface; #endif /// Socket option for time-to-live associated with outgoing multicast packets. /** * Implements the IPPROTO_IP/IP_MULTICAST_TTL socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::multicast::hops option(4); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::multicast::hops option; * socket.get_option(option); * int ttl = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined hops; #else typedef asio::ip::detail::socket_option::multicast_hops< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_TTL), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_HOPS)> hops; #endif /// Socket option determining whether outgoing multicast packets will be /// received on the same socket if it is a member of the multicast group. /** * Implements the IPPROTO_IP/IP_MULTICAST_LOOP socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::multicast::enable_loopback option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::multicast::enable_loopback option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined enable_loopback; #else typedef asio::ip::detail::socket_option::multicast_enable_loopback< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_MULTICAST_LOOP), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_MULTICAST_LOOP)> enable_loopback; #endif } // namespace multicast } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_MULTICAST_HPP galera-4-26.4.25/asio/asio/ip/impl/000755 000164 177776 00000000000 15107057160 017747 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ip/impl/network_v4.ipp000644 000164 177776 00000012350 15107057155 022570 0ustar00jenkinsnogroup000000 000000 // // ip/impl/network_v4.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_NETWORK_V4_IPP #define ASIO_IP_IMPL_NETWORK_V4_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include "asio/error.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/network_v4.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { network_v4::network_v4(const address_v4& addr, unsigned short prefix_len) : address_(addr), prefix_length_(prefix_len) { if (prefix_len > 32) { std::out_of_range ex("prefix length too large"); asio::detail::throw_exception(ex); } } network_v4::network_v4(const address_v4& addr, const address_v4& mask) : address_(addr), prefix_length_(0) { address_v4::bytes_type mask_bytes = mask.to_bytes(); bool finished = false; for (std::size_t i = 0; i < mask_bytes.size(); ++i) { if (finished) { if (mask_bytes[i]) { std::invalid_argument ex("non-contiguous netmask"); asio::detail::throw_exception(ex); } continue; } else { switch (mask_bytes[i]) { case 255: prefix_length_ += 8; break; case 254: // prefix_length_ += 7 prefix_length_ += 1; case 252: // prefix_length_ += 6 prefix_length_ += 1; case 248: // prefix_length_ += 5 prefix_length_ += 1; case 240: // prefix_length_ += 4 prefix_length_ += 1; case 224: // prefix_length_ += 3 prefix_length_ += 1; case 192: // prefix_length_ += 2 prefix_length_ += 1; case 128: // prefix_length_ += 1 prefix_length_ += 1; case 0: // nbits += 0 finished = true; break; default: std::out_of_range ex("non-contiguous netmask"); asio::detail::throw_exception(ex); } } } } address_v4 network_v4::netmask() const ASIO_NOEXCEPT { uint32_t nmbits = 0xffffffff; if (prefix_length_ == 0) nmbits = 0; else nmbits = nmbits << (32 - prefix_length_); return address_v4(nmbits); } address_v4_range network_v4::hosts() const ASIO_NOEXCEPT { return is_host() ? address_v4_range(address_, address_v4(address_.to_uint() + 1)) : address_v4_range(address_v4(network().to_uint() + 1), broadcast()); } bool network_v4::is_subnet_of(const network_v4& other) const { if (other.prefix_length_ >= prefix_length_) return false; // Only real subsets are allowed. const network_v4 me(address_, other.prefix_length_); return other.canonical() == me.canonical(); } std::string network_v4::to_string() const { asio::error_code ec; std::string addr = to_string(ec); asio::detail::throw_error(ec); return addr; } std::string network_v4::to_string(asio::error_code& ec) const { using namespace std; // For sprintf. ec = asio::error_code(); char prefix_len[16]; #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(prefix_len, sizeof(prefix_len), "/%u", prefix_length_); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(prefix_len, "/%u", prefix_length_); #endif // defined(ASIO_HAS_SECURE_RTL) return address_.to_string() + prefix_len; } network_v4 make_network_v4(const char* str) { return make_network_v4(std::string(str)); } network_v4 make_network_v4(const char* str, asio::error_code& ec) { return make_network_v4(std::string(str), ec); } network_v4 make_network_v4(const std::string& str) { asio::error_code ec; network_v4 net = make_network_v4(str, ec); asio::detail::throw_error(ec); return net; } network_v4 make_network_v4(const std::string& str, asio::error_code& ec) { std::string::size_type pos = str.find_first_of("/"); if (pos == std::string::npos) { ec = asio::error::invalid_argument; return network_v4(); } if (pos == str.size() - 1) { ec = asio::error::invalid_argument; return network_v4(); } std::string::size_type end = str.find_first_not_of("0123456789", pos + 1); if (end != std::string::npos) { ec = asio::error::invalid_argument; return network_v4(); } const address_v4 addr = make_address_v4(str.substr(0, pos), ec); if (ec) return network_v4(); const int prefix_len = std::atoi(str.substr(pos + 1).c_str()); if (prefix_len < 0 || prefix_len > 32) { ec = asio::error::invalid_argument; return network_v4(); } return network_v4(addr, static_cast(prefix_len)); } #if defined(ASIO_HAS_STRING_VIEW) network_v4 make_network_v4(string_view str) { return make_network_v4(static_cast(str)); } network_v4 make_network_v4(string_view str, asio::error_code& ec) { return make_network_v4(static_cast(str), ec); } #endif // defined(ASIO_HAS_STRING_VIEW) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_NETWORK_V4_IPP galera-4-26.4.25/asio/asio/ip/impl/host_name.ipp000644 000164 177776 00000002410 15107057155 022437 0ustar00jenkinsnogroup000000 000000 // // ip/impl/host_name.ipp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_HOST_NAME_IPP #define ASIO_IP_IMPL_HOST_NAME_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/ip/host_name.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { std::string host_name() { char name[1024]; asio::error_code ec; if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0) { asio::detail::throw_error(ec); return std::string(); } return std::string(name); } std::string host_name(asio::error_code& ec) { char name[1024]; if (asio::detail::socket_ops::gethostname(name, sizeof(name), ec) != 0) return std::string(); return std::string(name); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_HOST_NAME_IPP galera-4-26.4.25/asio/asio/ip/impl/address.ipp000644 000164 177776 00000012212 15107057155 022110 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address.ipp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_IPP #define ASIO_IP_IMPL_ADDRESS_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/ip/address.hpp" #include "asio/ip/bad_address_cast.hpp" #include "asio/system_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address::address() ASIO_NOEXCEPT : type_(ipv4), ipv4_address_(), ipv6_address_() { } address::address( const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT : type_(ipv4), ipv4_address_(ipv4_address), ipv6_address_() { } address::address( const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT : type_(ipv6), ipv4_address_(), ipv6_address_(ipv6_address) { } address::address(const address& other) ASIO_NOEXCEPT : type_(other.type_), ipv4_address_(other.ipv4_address_), ipv6_address_(other.ipv6_address_) { } #if defined(ASIO_HAS_MOVE) address::address(address&& other) ASIO_NOEXCEPT : type_(other.type_), ipv4_address_(other.ipv4_address_), ipv6_address_(other.ipv6_address_) { } #endif // defined(ASIO_HAS_MOVE) address& address::operator=(const address& other) ASIO_NOEXCEPT { type_ = other.type_; ipv4_address_ = other.ipv4_address_; ipv6_address_ = other.ipv6_address_; return *this; } #if defined(ASIO_HAS_MOVE) address& address::operator=(address&& other) ASIO_NOEXCEPT { type_ = other.type_; ipv4_address_ = other.ipv4_address_; ipv6_address_ = other.ipv6_address_; return *this; } #endif // defined(ASIO_HAS_MOVE) address& address::operator=( const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT { type_ = ipv4; ipv4_address_ = ipv4_address; ipv6_address_ = asio::ip::address_v6(); return *this; } address& address::operator=( const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT { type_ = ipv6; ipv4_address_ = asio::ip::address_v4(); ipv6_address_ = ipv6_address; return *this; } address make_address(const char* str) { asio::error_code ec; address addr = make_address(str, ec); asio::detail::throw_error(ec); return addr; } address make_address(const char* str, asio::error_code& ec) ASIO_NOEXCEPT { asio::ip::address_v6 ipv6_address = asio::ip::make_address_v6(str, ec); if (!ec) return address(ipv6_address); asio::ip::address_v4 ipv4_address = asio::ip::make_address_v4(str, ec); if (!ec) return address(ipv4_address); return address(); } address make_address(const std::string& str) { return make_address(str.c_str()); } address make_address(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address(str.c_str(), ec); } #if defined(ASIO_HAS_STRING_VIEW) address make_address(string_view str) { return make_address(static_cast(str)); } address make_address(string_view str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address(static_cast(str), ec); } #endif // defined(ASIO_HAS_STRING_VIEW) asio::ip::address_v4 address::to_v4() const { if (type_ != ipv4) { bad_address_cast ex; asio::detail::throw_exception(ex); } return ipv4_address_; } asio::ip::address_v6 address::to_v6() const { if (type_ != ipv6) { bad_address_cast ex; asio::detail::throw_exception(ex); } return ipv6_address_; } std::string address::to_string() const { if (type_ == ipv6) return ipv6_address_.to_string(); return ipv4_address_.to_string(); } #if !defined(ASIO_NO_DEPRECATED) std::string address::to_string(asio::error_code& ec) const { if (type_ == ipv6) return ipv6_address_.to_string(ec); return ipv4_address_.to_string(ec); } #endif // !defined(ASIO_NO_DEPRECATED) bool address::is_loopback() const ASIO_NOEXCEPT { return (type_ == ipv4) ? ipv4_address_.is_loopback() : ipv6_address_.is_loopback(); } bool address::is_unspecified() const ASIO_NOEXCEPT { return (type_ == ipv4) ? ipv4_address_.is_unspecified() : ipv6_address_.is_unspecified(); } bool address::is_multicast() const ASIO_NOEXCEPT { return (type_ == ipv4) ? ipv4_address_.is_multicast() : ipv6_address_.is_multicast(); } bool operator==(const address& a1, const address& a2) ASIO_NOEXCEPT { if (a1.type_ != a2.type_) return false; if (a1.type_ == address::ipv6) return a1.ipv6_address_ == a2.ipv6_address_; return a1.ipv4_address_ == a2.ipv4_address_; } bool operator<(const address& a1, const address& a2) ASIO_NOEXCEPT { if (a1.type_ < a2.type_) return true; if (a1.type_ > a2.type_) return false; if (a1.type_ == address::ipv6) return a1.ipv6_address_ < a2.ipv6_address_; return a1.ipv4_address_ < a2.ipv4_address_; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_IPP galera-4-26.4.25/asio/asio/ip/impl/address_v6.hpp000644 000164 177776 00000003016 15107057155 022524 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address_v6.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V6_HPP #define ASIO_IP_IMPL_ADDRESS_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { #if !defined(ASIO_NO_DEPRECATED) inline address_v6 address_v6::from_string(const char* str) { return asio::ip::make_address_v6(str); } inline address_v6 address_v6::from_string( const char* str, asio::error_code& ec) { return asio::ip::make_address_v6(str, ec); } inline address_v6 address_v6::from_string(const std::string& str) { return asio::ip::make_address_v6(str); } inline address_v6 address_v6::from_string( const std::string& str, asio::error_code& ec) { return asio::ip::make_address_v6(str, ec); } #endif // !defined(ASIO_NO_DEPRECATED) template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v6& addr) { return os << addr.to_string().c_str(); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_V6_HPP galera-4-26.4.25/asio/asio/ip/impl/network_v6.hpp000644 000164 177776 00000002432 15107057155 022571 0ustar00jenkinsnogroup000000 000000 // // ip/impl/network_v6.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_NETWORK_V6_HPP #define ASIO_IP_IMPL_NETWORK_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const network_v6& addr) { asio::error_code ec; std::string s = addr.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_NETWORK_V6_HPP galera-4-26.4.25/asio/asio/ip/impl/address_v4.ipp000644 000164 177776 00000012155 15107057155 022527 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address_v4.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V4_IPP #define ASIO_IP_IMPL_ADDRESS_V4_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/error.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/address_v4.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address_v4::address_v4(const address_v4::bytes_type& bytes) { #if UCHAR_MAX > 0xFF if (bytes[0] > 0xFF || bytes[1] > 0xFF || bytes[2] > 0xFF || bytes[3] > 0xFF) { std::out_of_range ex("address_v4 from bytes_type"); asio::detail::throw_exception(ex); } #endif // UCHAR_MAX > 0xFF using namespace std; // For memcpy. memcpy(&addr_.s_addr, bytes.data(), 4); } address_v4::address_v4(address_v4::uint_type addr) { if ((std::numeric_limits::max)() > 0xFFFFFFFF) { std::out_of_range ex("address_v4 from unsigned integer"); asio::detail::throw_exception(ex); } addr_.s_addr = asio::detail::socket_ops::host_to_network_long( static_cast(addr)); } address_v4::bytes_type address_v4::to_bytes() const ASIO_NOEXCEPT { using namespace std; // For memcpy. bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), &addr_.s_addr, 4); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, &addr_.s_addr, 4); #endif // defined(ASIO_HAS_STD_ARRAY) return bytes; } address_v4::uint_type address_v4::to_uint() const ASIO_NOEXCEPT { return asio::detail::socket_ops::network_to_host_long(addr_.s_addr); } #if !defined(ASIO_NO_DEPRECATED) unsigned long address_v4::to_ulong() const { return asio::detail::socket_ops::network_to_host_long(addr_.s_addr); } #endif // !defined(ASIO_NO_DEPRECATED) std::string address_v4::to_string() const { asio::error_code ec; char addr_str[asio::detail::max_addr_v4_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET), &addr_, addr_str, asio::detail::max_addr_v4_str_len, 0, ec); if (addr == 0) asio::detail::throw_error(ec); return addr; } #if !defined(ASIO_NO_DEPRECATED) std::string address_v4::to_string(asio::error_code& ec) const { char addr_str[asio::detail::max_addr_v4_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET), &addr_, addr_str, asio::detail::max_addr_v4_str_len, 0, ec); if (addr == 0) return std::string(); return addr; } #endif // !defined(ASIO_NO_DEPRECATED) bool address_v4::is_loopback() const ASIO_NOEXCEPT { return (to_uint() & 0xFF000000) == 0x7F000000; } bool address_v4::is_unspecified() const ASIO_NOEXCEPT { return to_uint() == 0; } #if !defined(ASIO_NO_DEPRECATED) bool address_v4::is_class_a() const { return (to_uint() & 0x80000000) == 0; } bool address_v4::is_class_b() const { return (to_uint() & 0xC0000000) == 0x80000000; } bool address_v4::is_class_c() const { return (to_uint() & 0xE0000000) == 0xC0000000; } #endif // !defined(ASIO_NO_DEPRECATED) bool address_v4::is_multicast() const ASIO_NOEXCEPT { return (to_uint() & 0xF0000000) == 0xE0000000; } #if !defined(ASIO_NO_DEPRECATED) address_v4 address_v4::broadcast(const address_v4& addr, const address_v4& mask) { return address_v4(addr.to_uint() | (mask.to_uint() ^ 0xFFFFFFFF)); } address_v4 address_v4::netmask(const address_v4& addr) { if (addr.is_class_a()) return address_v4(0xFF000000); if (addr.is_class_b()) return address_v4(0xFFFF0000); if (addr.is_class_c()) return address_v4(0xFFFFFF00); return address_v4(0xFFFFFFFF); } #endif // !defined(ASIO_NO_DEPRECATED) address_v4 make_address_v4(const char* str) { asio::error_code ec; address_v4 addr = make_address_v4(str, ec); asio::detail::throw_error(ec); return addr; } address_v4 make_address_v4(const char* str, asio::error_code& ec) ASIO_NOEXCEPT { address_v4::bytes_type bytes; if (asio::detail::socket_ops::inet_pton( ASIO_OS_DEF(AF_INET), str, &bytes, 0, ec) <= 0) return address_v4(); return address_v4(bytes); } address_v4 make_address_v4(const std::string& str) { return make_address_v4(str.c_str()); } address_v4 make_address_v4(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address_v4(str.c_str(), ec); } #if defined(ASIO_HAS_STRING_VIEW) address_v4 make_address_v4(string_view str) { return make_address_v4(static_cast(str)); } address_v4 make_address_v4(string_view str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address_v4(static_cast(str), ec); } #endif // defined(ASIO_HAS_STRING_VIEW) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_V4_IPP galera-4-26.4.25/asio/asio/ip/impl/network_v4.hpp000644 000164 177776 00000002545 15107057155 022574 0ustar00jenkinsnogroup000000 000000 // // ip/impl/network_v4.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_NETWORK_V4_HPP #define ASIO_IP_IMPL_NETWORK_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const network_v4& addr) { asio::error_code ec; std::string s = addr.to_string(ec); if (ec) { if (os.exceptions() & std::basic_ostream::failbit) asio::detail::throw_error(ec); else os.setstate(std::basic_ostream::failbit); } else for (std::string::iterator i = s.begin(); i != s.end(); ++i) os << os.widen(*i); return os; } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_NETWORK_V4_HPP galera-4-26.4.25/asio/asio/ip/impl/address_v6.ipp000644 000164 177776 00000023242 15107057155 022530 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address_v6.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V6_IPP #define ASIO_IP_IMPL_ADDRESS_V6_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/error.hpp" #include "asio/ip/address_v6.hpp" #include "asio/ip/bad_address_cast.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { address_v6::address_v6() ASIO_NOEXCEPT : addr_(), scope_id_(0) { } address_v6::address_v6(const address_v6::bytes_type& bytes, unsigned long scope) : scope_id_(scope) { #if UCHAR_MAX > 0xFF for (std::size_t i = 0; i < bytes.size(); ++i) { if (bytes[i] > 0xFF) { std::out_of_range ex("address_v6 from bytes_type"); asio::detail::throw_exception(ex); } } #endif // UCHAR_MAX > 0xFF using namespace std; // For memcpy. memcpy(addr_.s6_addr, bytes.data(), 16); } address_v6::address_v6(const address_v6& other) ASIO_NOEXCEPT : addr_(other.addr_), scope_id_(other.scope_id_) { } #if defined(ASIO_HAS_MOVE) address_v6::address_v6(address_v6&& other) ASIO_NOEXCEPT : addr_(other.addr_), scope_id_(other.scope_id_) { } #endif // defined(ASIO_HAS_MOVE) address_v6& address_v6::operator=(const address_v6& other) ASIO_NOEXCEPT { addr_ = other.addr_; scope_id_ = other.scope_id_; return *this; } #if defined(ASIO_HAS_MOVE) address_v6& address_v6::operator=(address_v6&& other) ASIO_NOEXCEPT { addr_ = other.addr_; scope_id_ = other.scope_id_; return *this; } #endif // defined(ASIO_HAS_MOVE) address_v6::bytes_type address_v6::to_bytes() const ASIO_NOEXCEPT { using namespace std; // For memcpy. bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), addr_.s6_addr, 16); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, addr_.s6_addr, 16); #endif // defined(ASIO_HAS_STD_ARRAY) return bytes; } std::string address_v6::to_string() const { asio::error_code ec; char addr_str[asio::detail::max_addr_v6_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET6), &addr_, addr_str, asio::detail::max_addr_v6_str_len, scope_id_, ec); if (addr == 0) asio::detail::throw_error(ec); return addr; } #if !defined(ASIO_NO_DEPRECATED) std::string address_v6::to_string(asio::error_code& ec) const { char addr_str[asio::detail::max_addr_v6_str_len]; const char* addr = asio::detail::socket_ops::inet_ntop( ASIO_OS_DEF(AF_INET6), &addr_, addr_str, asio::detail::max_addr_v6_str_len, scope_id_, ec); if (addr == 0) return std::string(); return addr; } address_v4 address_v6::to_v4() const { if (!is_v4_mapped() && !is_v4_compatible()) { bad_address_cast ex; asio::detail::throw_exception(ex); } address_v4::bytes_type v4_bytes = { { addr_.s6_addr[12], addr_.s6_addr[13], addr_.s6_addr[14], addr_.s6_addr[15] } }; return address_v4(v4_bytes); } #endif // !defined(ASIO_NO_DEPRECATED) bool address_v6::is_loopback() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 1)); } bool address_v6::is_unspecified() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && (addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && (addr_.s6_addr[15] == 0)); } bool address_v6::is_link_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0x80)); } bool address_v6::is_site_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xfe) && ((addr_.s6_addr[1] & 0xc0) == 0xc0)); } bool address_v6::is_v4_mapped() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0xff) && (addr_.s6_addr[11] == 0xff)); } #if !defined(ASIO_NO_DEPRECATED) bool address_v6::is_v4_compatible() const { return ((addr_.s6_addr[0] == 0) && (addr_.s6_addr[1] == 0) && (addr_.s6_addr[2] == 0) && (addr_.s6_addr[3] == 0) && (addr_.s6_addr[4] == 0) && (addr_.s6_addr[5] == 0) && (addr_.s6_addr[6] == 0) && (addr_.s6_addr[7] == 0) && (addr_.s6_addr[8] == 0) && (addr_.s6_addr[9] == 0) && (addr_.s6_addr[10] == 0) && (addr_.s6_addr[11] == 0) && !((addr_.s6_addr[12] == 0) && (addr_.s6_addr[13] == 0) && (addr_.s6_addr[14] == 0) && ((addr_.s6_addr[15] == 0) || (addr_.s6_addr[15] == 1)))); } #endif // !defined(ASIO_NO_DEPRECATED) bool address_v6::is_multicast() const ASIO_NOEXCEPT { return (addr_.s6_addr[0] == 0xff); } bool address_v6::is_multicast_global() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x0e)); } bool address_v6::is_multicast_link_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x02)); } bool address_v6::is_multicast_node_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x01)); } bool address_v6::is_multicast_org_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x08)); } bool address_v6::is_multicast_site_local() const ASIO_NOEXCEPT { return ((addr_.s6_addr[0] == 0xff) && ((addr_.s6_addr[1] & 0x0f) == 0x05)); } bool operator==(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { using namespace std; // For memcmp. return memcmp(&a1.addr_, &a2.addr_, sizeof(asio::detail::in6_addr_type)) == 0 && a1.scope_id_ == a2.scope_id_; } bool operator<(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { using namespace std; // For memcmp. int memcmp_result = memcmp(&a1.addr_, &a2.addr_, sizeof(asio::detail::in6_addr_type)); if (memcmp_result < 0) return true; if (memcmp_result > 0) return false; return a1.scope_id_ < a2.scope_id_; } address_v6 address_v6::loopback() ASIO_NOEXCEPT { address_v6 tmp; tmp.addr_.s6_addr[15] = 1; return tmp; } #if !defined(ASIO_NO_DEPRECATED) address_v6 address_v6::v4_mapped(const address_v4& addr) { address_v4::bytes_type v4_bytes = addr.to_bytes(); bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } }; return address_v6(v6_bytes); } address_v6 address_v6::v4_compatible(const address_v4& addr) { address_v4::bytes_type v4_bytes = addr.to_bytes(); bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } }; return address_v6(v6_bytes); } #endif // !defined(ASIO_NO_DEPRECATED) address_v6 make_address_v6(const char* str) { asio::error_code ec; address_v6 addr = make_address_v6(str, ec); asio::detail::throw_error(ec); return addr; } address_v6 make_address_v6(const char* str, asio::error_code& ec) ASIO_NOEXCEPT { address_v6::bytes_type bytes; unsigned long scope_id = 0; if (asio::detail::socket_ops::inet_pton( ASIO_OS_DEF(AF_INET6), str, &bytes[0], &scope_id, ec) <= 0) return address_v6(); return address_v6(bytes, scope_id); } address_v6 make_address_v6(const std::string& str) { return make_address_v6(str.c_str()); } address_v6 make_address_v6(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address_v6(str.c_str(), ec); } #if defined(ASIO_HAS_STRING_VIEW) address_v6 make_address_v6(string_view str) { return make_address_v6(static_cast(str)); } address_v6 make_address_v6(string_view str, asio::error_code& ec) ASIO_NOEXCEPT { return make_address_v6(static_cast(str), ec); } #endif // defined(ASIO_HAS_STRING_VIEW) address_v4 make_address_v4( v4_mapped_t, const address_v6& v6_addr) { if (!v6_addr.is_v4_mapped()) { bad_address_cast ex; asio::detail::throw_exception(ex); } address_v6::bytes_type v6_bytes = v6_addr.to_bytes(); address_v4::bytes_type v4_bytes = { { v6_bytes[12], v6_bytes[13], v6_bytes[14], v6_bytes[15] } }; return address_v4(v4_bytes); } address_v6 make_address_v6( v4_mapped_t, const address_v4& v4_addr) { address_v4::bytes_type v4_bytes = v4_addr.to_bytes(); address_v6::bytes_type v6_bytes = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, v4_bytes[0], v4_bytes[1], v4_bytes[2], v4_bytes[3] } }; return address_v6(v6_bytes); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_ADDRESS_V6_IPP galera-4-26.4.25/asio/asio/ip/impl/basic_endpoint.hpp000644 000164 177776 00000002155 15107057155 023450 0ustar00jenkinsnogroup000000 000000 // // ip/impl/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_BASIC_ENDPOINT_HPP #define ASIO_IP_IMPL_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint) { asio::ip::detail::endpoint tmp_ep(endpoint.address(), endpoint.port()); return os << tmp_ep.to_string().c_str(); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_BASIC_ENDPOINT_HPP galera-4-26.4.25/asio/asio/ip/impl/network_v6.ipp000644 000164 177776 00000011136 15107057155 022573 0ustar00jenkinsnogroup000000 000000 // // ip/impl/network_v6.ipp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_NETWORK_V6_IPP #define ASIO_IP_IMPL_NETWORK_V6_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include "asio/error.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/network_v6.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { network_v6::network_v6(const address_v6& addr, unsigned short prefix_len) : address_(addr), prefix_length_(prefix_len) { if (prefix_len > 128) { std::out_of_range ex("prefix length too large"); asio::detail::throw_exception(ex); } } ASIO_DECL address_v6 network_v6::network() const ASIO_NOEXCEPT { address_v6::bytes_type bytes(address_.to_bytes()); for (std::size_t i = 0; i < 16; ++i) { if (prefix_length_ <= i * 8) bytes[i] = 0; else if (prefix_length_ < (i + 1) * 8) bytes[i] &= 0xFF00 >> (prefix_length_ % 8); } return address_v6(bytes, address_.scope_id()); } address_v6_range network_v6::hosts() const ASIO_NOEXCEPT { address_v6::bytes_type begin_bytes(address_.to_bytes()); address_v6::bytes_type end_bytes(address_.to_bytes()); for (std::size_t i = 0; i < 16; ++i) { if (prefix_length_ <= i * 8) { begin_bytes[i] = 0; end_bytes[i] = 0xFF; } else if (prefix_length_ < (i + 1) * 8) { begin_bytes[i] &= 0xFF00 >> (prefix_length_ % 8); end_bytes[i] |= 0xFF >> (prefix_length_ % 8); } } return address_v6_range( address_v6_iterator(address_v6(begin_bytes, address_.scope_id())), ++address_v6_iterator(address_v6(end_bytes, address_.scope_id()))); } bool network_v6::is_subnet_of(const network_v6& other) const { if (other.prefix_length_ >= prefix_length_) return false; // Only real subsets are allowed. const network_v6 me(address_, other.prefix_length_); return other.canonical() == me.canonical(); } std::string network_v6::to_string() const { asio::error_code ec; std::string addr = to_string(ec); asio::detail::throw_error(ec); return addr; } std::string network_v6::to_string(asio::error_code& ec) const { using namespace std; // For sprintf. ec = asio::error_code(); char prefix_len[16]; #if defined(ASIO_HAS_SECURE_RTL) sprintf_s(prefix_len, sizeof(prefix_len), "/%u", prefix_length_); #else // defined(ASIO_HAS_SECURE_RTL) sprintf(prefix_len, "/%u", prefix_length_); #endif // defined(ASIO_HAS_SECURE_RTL) return address_.to_string() + prefix_len; } network_v6 make_network_v6(const char* str) { return make_network_v6(std::string(str)); } network_v6 make_network_v6(const char* str, asio::error_code& ec) { return make_network_v6(std::string(str), ec); } network_v6 make_network_v6(const std::string& str) { asio::error_code ec; network_v6 net = make_network_v6(str, ec); asio::detail::throw_error(ec); return net; } network_v6 make_network_v6(const std::string& str, asio::error_code& ec) { std::string::size_type pos = str.find_first_of("/"); if (pos == std::string::npos) { ec = asio::error::invalid_argument; return network_v6(); } if (pos == str.size() - 1) { ec = asio::error::invalid_argument; return network_v6(); } std::string::size_type end = str.find_first_not_of("0123456789", pos + 1); if (end != std::string::npos) { ec = asio::error::invalid_argument; return network_v6(); } const address_v6 addr = make_address_v6(str.substr(0, pos), ec); if (ec) return network_v6(); const int prefix_len = std::atoi(str.substr(pos + 1).c_str()); if (prefix_len < 0 || prefix_len > 128) { ec = asio::error::invalid_argument; return network_v6(); } return network_v6(addr, static_cast(prefix_len)); } #if defined(ASIO_HAS_STRING_VIEW) network_v6 make_network_v6(string_view str) { return make_network_v6(static_cast(str)); } network_v6 make_network_v6(string_view str, asio::error_code& ec) { return make_network_v6(static_cast(str), ec); } #endif // defined(ASIO_HAS_STRING_VIEW) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_IMPL_NETWORK_V6_IPP galera-4-26.4.25/asio/asio/ip/impl/address.hpp000644 000164 177776 00000002730 15107057155 022113 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_HPP #define ASIO_IP_IMPL_ADDRESS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { #if !defined(ASIO_NO_DEPRECATED) inline address address::from_string(const char* str) { return asio::ip::make_address(str); } inline address address::from_string( const char* str, asio::error_code& ec) { return asio::ip::make_address(str, ec); } inline address address::from_string(const std::string& str) { return asio::ip::make_address(str); } inline address address::from_string( const std::string& str, asio::error_code& ec) { return asio::ip::make_address(str, ec); } #endif // !defined(ASIO_NO_DEPRECATED) template std::basic_ostream& operator<<( std::basic_ostream& os, const address& addr) { return os << addr.to_string().c_str(); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_HPP galera-4-26.4.25/asio/asio/ip/impl/address_v4.hpp000644 000164 177776 00000003016 15107057155 022522 0ustar00jenkinsnogroup000000 000000 // // ip/impl/address_v4.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_IMPL_ADDRESS_V4_HPP #define ASIO_IP_IMPL_ADDRESS_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #if !defined(ASIO_NO_IOSTREAM) #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { #if !defined(ASIO_NO_DEPRECATED) inline address_v4 address_v4::from_string(const char* str) { return asio::ip::make_address_v4(str); } inline address_v4 address_v4::from_string( const char* str, asio::error_code& ec) { return asio::ip::make_address_v4(str, ec); } inline address_v4 address_v4::from_string(const std::string& str) { return asio::ip::make_address_v4(str); } inline address_v4 address_v4::from_string( const std::string& str, asio::error_code& ec) { return asio::ip::make_address_v4(str, ec); } #endif // !defined(ASIO_NO_DEPRECATED) template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v4& addr) { return os << addr.to_string().c_str(); } } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_IP_IMPL_ADDRESS_V4_HPP galera-4-26.4.25/asio/asio/ip/bad_address_cast.hpp000644 000164 177776 00000002310 15107057155 022764 0ustar00jenkinsnogroup000000 000000 // // ip/bad_address_cast.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BAD_ADDRESS_CAST_HPP #define ASIO_IP_BAD_ADDRESS_CAST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Thrown to indicate a failed address conversion. class bad_address_cast : #if defined(ASIO_MSVC) && defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS public std::exception #else public std::bad_cast #endif { public: /// Default constructor. bad_address_cast() {} /// Destructor. virtual ~bad_address_cast() ASIO_NOEXCEPT_OR_NOTHROW {} /// Get the message associated with the exception. virtual const char* what() const ASIO_NOEXCEPT_OR_NOTHROW { return "bad address cast"; } }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ADDRESS_HPP galera-4-26.4.25/asio/asio/ip/basic_resolver_entry.hpp000644 000164 177776 00000005550 15107057155 023753 0ustar00jenkinsnogroup000000 000000 // // ip/basic_resolver_entry.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_ENTRY_HPP #define ASIO_IP_BASIC_RESOLVER_ENTRY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/string_view.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An entry produced by a resolver. /** * The asio::ip::basic_resolver_entry class template describes an entry * as returned by a resolver. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_entry { public: /// The protocol type associated with the endpoint entry. typedef InternetProtocol protocol_type; /// The endpoint type associated with the endpoint entry. typedef typename InternetProtocol::endpoint endpoint_type; /// Default constructor. basic_resolver_entry() { } /// Construct with specified endpoint, host name and service name. basic_resolver_entry(const endpoint_type& ep, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service) : endpoint_(ep), host_name_(static_cast(host)), service_name_(static_cast(service)) { } /// Get the endpoint associated with the entry. endpoint_type endpoint() const { return endpoint_; } /// Convert to the endpoint associated with the entry. operator endpoint_type() const { return endpoint_; } /// Get the host name associated with the entry. std::string host_name() const { return host_name_; } /// Get the host name associated with the entry. template std::basic_string, Allocator> host_name( const Allocator& alloc = Allocator()) const { return std::basic_string, Allocator>( host_name_.c_str(), alloc); } /// Get the service name associated with the entry. std::string service_name() const { return service_name_; } /// Get the service name associated with the entry. template std::basic_string, Allocator> service_name( const Allocator& alloc = Allocator()) const { return std::basic_string, Allocator>( service_name_.c_str(), alloc); } private: endpoint_type endpoint_; std::string host_name_; std::string service_name_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_ENTRY_HPP galera-4-26.4.25/asio/asio/ip/v6_only.hpp000644 000164 177776 00000003210 15107057155 021113 0ustar00jenkinsnogroup000000 000000 // // ip/v6_only.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_V6_ONLY_HPP #define ASIO_IP_V6_ONLY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Socket option for determining whether an IPv6 socket supports IPv6 /// communication only. /** * Implements the IPPROTO_IPV6/IP_V6ONLY socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::v6_only option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::v6_only option; * socket.get_option(option); * bool v6_only = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined v6_only; #elif defined(IPV6_V6ONLY) typedef asio::detail::socket_option::boolean< IPPROTO_IPV6, IPV6_V6ONLY> v6_only; #else typedef asio::detail::socket_option::boolean< asio::detail::custom_socket_option_level, asio::detail::always_fail_option> v6_only; #endif } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_V6_ONLY_HPP galera-4-26.4.25/asio/asio/ip/tcp.hpp000644 000164 177776 00000007005 15107057155 020313 0ustar00jenkinsnogroup000000 000000 // // ip/tcp.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_TCP_HPP #define ASIO_IP_TCP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for TCP. /** * The asio::ip::tcp class contains flags necessary for TCP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class tcp { public: /// The type of a TCP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 TCP protocol. static tcp v4() { return tcp(ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 TCP protocol. static tcp v6() { return tcp(ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_STREAM); } /// Obtain an identifier for the protocol. int protocol() const { return ASIO_OS_DEF(IPPROTO_TCP); } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The TCP socket type. typedef basic_stream_socket socket; /// The TCP acceptor type. typedef basic_socket_acceptor acceptor; /// The TCP resolver type. typedef basic_resolver resolver; #if !defined(ASIO_NO_IOSTREAM) /// The TCP iostream type. typedef basic_socket_iostream iostream; #endif // !defined(ASIO_NO_IOSTREAM) /// Socket option for disabling the Nagle algorithm. /** * Implements the IPPROTO_TCP/TCP_NODELAY socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::no_delay option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::ip::tcp::no_delay option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined no_delay; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(IPPROTO_TCP), ASIO_OS_DEF(TCP_NODELAY)> no_delay; #endif /// Compare two protocols for equality. friend bool operator==(const tcp& p1, const tcp& p2) { return p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const tcp& p1, const tcp& p2) { return p1.family_ != p2.family_; } private: // Construct with a specific family. explicit tcp(int protocol_family) : family_(protocol_family) { } int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_TCP_HPP galera-4-26.4.25/asio/asio/ip/icmp.hpp000644 000164 177776 00000005045 15107057155 020457 0ustar00jenkinsnogroup000000 000000 // // ip/icmp.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ICMP_HPP #define ASIO_IP_ICMP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_types.hpp" #include "asio/basic_raw_socket.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for ICMP. /** * The asio::ip::icmp class contains flags necessary for ICMP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class icmp { public: /// The type of a ICMP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 ICMP protocol. static icmp v4() { return icmp(ASIO_OS_DEF(IPPROTO_ICMP), ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 ICMP protocol. static icmp v6() { return icmp(ASIO_OS_DEF(IPPROTO_ICMPV6), ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_RAW); } /// Obtain an identifier for the protocol. int protocol() const { return protocol_; } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The ICMP socket type. typedef basic_raw_socket socket; /// The ICMP resolver type. typedef basic_resolver resolver; /// Compare two protocols for equality. friend bool operator==(const icmp& p1, const icmp& p2) { return p1.protocol_ == p2.protocol_ && p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const icmp& p1, const icmp& p2) { return p1.protocol_ != p2.protocol_ || p1.family_ != p2.family_; } private: // Construct with a specific family. explicit icmp(int protocol_id, int protocol_family) : protocol_(protocol_id), family_(protocol_family) { } int protocol_; int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ICMP_HPP galera-4-26.4.25/asio/asio/ip/address_v6.hpp000644 000164 177776 00000023210 15107057155 021561 0ustar00jenkinsnogroup000000 000000 // // ip/address_v6.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V6_HPP #define ASIO_IP_ADDRESS_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/array.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #include "asio/ip/address_v4.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template class basic_address_iterator; /// Implements IP version 6 style addresses. /** * The asio::ip::address_v6 class provides the ability to use and * manipulate IP version 6 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address_v6 { public: /// The type used to represent an address as an array of bytes. /** * @note This type is defined in terms of the C++0x template @c std::array * when it is available. Otherwise, it uses @c boost:array. */ #if defined(GENERATING_DOCUMENTATION) typedef array bytes_type; #else typedef asio::detail::array bytes_type; #endif /// Default constructor. ASIO_DECL address_v6() ASIO_NOEXCEPT; /// Construct an address from raw bytes and scope ID. ASIO_DECL explicit address_v6(const bytes_type& bytes, unsigned long scope_id = 0); /// Copy constructor. ASIO_DECL address_v6(const address_v6& other) ASIO_NOEXCEPT; #if defined(ASIO_HAS_MOVE) /// Move constructor. ASIO_DECL address_v6(address_v6&& other) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. ASIO_DECL address_v6& operator=( const address_v6& other) ASIO_NOEXCEPT; #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. ASIO_DECL address_v6& operator=(address_v6&& other) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_MOVE) /// The scope ID of the address. /** * Returns the scope ID associated with the IPv6 address. */ unsigned long scope_id() const ASIO_NOEXCEPT { return scope_id_; } /// The scope ID of the address. /** * Modifies the scope ID associated with the IPv6 address. */ void scope_id(unsigned long id) ASIO_NOEXCEPT { scope_id_ = id; } /// Get the address in bytes, in network byte order. ASIO_DECL bytes_type to_bytes() const ASIO_NOEXCEPT; /// Get the address as a string. ASIO_DECL std::string to_string() const; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use other overload.) Get the address as a string. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP /// address string. static address_v6 from_string(const char* str); /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP /// address string. static address_v6 from_string( const char* str, asio::error_code& ec); /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP /// address string. static address_v6 from_string(const std::string& str); /// (Deprecated: Use make_address_v6().) Create an IPv6 address from an IP /// address string. static address_v6 from_string( const std::string& str, asio::error_code& ec); /// (Deprecated: Use make_address_v4().) Converts an IPv4-mapped or /// IPv4-compatible address to an IPv4 address. ASIO_DECL address_v4 to_v4() const; #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT; /// Determine whether the address is link local. ASIO_DECL bool is_link_local() const ASIO_NOEXCEPT; /// Determine whether the address is site local. ASIO_DECL bool is_site_local() const ASIO_NOEXCEPT; /// Determine whether the address is a mapped IPv4 address. ASIO_DECL bool is_v4_mapped() const ASIO_NOEXCEPT; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: No replacement.) Determine whether the address is an /// IPv4-compatible address. ASIO_DECL bool is_v4_compatible() const; #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT; /// Determine whether the address is a global multicast address. ASIO_DECL bool is_multicast_global() const ASIO_NOEXCEPT; /// Determine whether the address is a link-local multicast address. ASIO_DECL bool is_multicast_link_local() const ASIO_NOEXCEPT; /// Determine whether the address is a node-local multicast address. ASIO_DECL bool is_multicast_node_local() const ASIO_NOEXCEPT; /// Determine whether the address is a org-local multicast address. ASIO_DECL bool is_multicast_org_local() const ASIO_NOEXCEPT; /// Determine whether the address is a site-local multicast address. ASIO_DECL bool is_multicast_site_local() const ASIO_NOEXCEPT; /// Compare two addresses for equality. ASIO_DECL friend bool operator==(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT; /// Compare two addresses for inequality. friend bool operator!=(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { return !(a1 == a2); } /// Compare addresses for ordering. ASIO_DECL friend bool operator<(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT; /// Compare addresses for ordering. friend bool operator>(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { return a2 < a1; } /// Compare addresses for ordering. friend bool operator<=(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { return !(a2 < a1); } /// Compare addresses for ordering. friend bool operator>=(const address_v6& a1, const address_v6& a2) ASIO_NOEXCEPT { return !(a1 < a2); } /// Obtain an address object that represents any address. static address_v6 any() ASIO_NOEXCEPT { return address_v6(); } /// Obtain an address object that represents the loopback address. ASIO_DECL static address_v6 loopback() ASIO_NOEXCEPT; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use make_address_v6().) Create an IPv4-mapped IPv6 address. ASIO_DECL static address_v6 v4_mapped(const address_v4& addr); /// (Deprecated: No replacement.) Create an IPv4-compatible IPv6 address. ASIO_DECL static address_v6 v4_compatible(const address_v4& addr); #endif // !defined(ASIO_NO_DEPRECATED) private: friend class basic_address_iterator; // The underlying IPv6 address. asio::detail::in6_addr_type addr_; // The scope ID associated with the address. unsigned long scope_id_; }; /// Create an IPv6 address from raw bytes and scope ID. /** * @relates address_v6 */ inline address_v6 make_address_v6(const address_v6::bytes_type& bytes, unsigned long scope_id = 0) { return address_v6(bytes, scope_id); } /// Create an IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(const char* str); /// Create an IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(const char* str, asio::error_code& ec) ASIO_NOEXCEPT; /// Createan IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(const std::string& str); /// Create an IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT; #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create an IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(string_view str); /// Create an IPv6 address from an IP address string. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6(string_view str, asio::error_code& ec) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) /// Tag type used for distinguishing overloads that deal in IPv4-mapped IPv6 /// addresses. enum v4_mapped_t { v4_mapped }; /// Create an IPv4 address from a IPv4-mapped IPv6 address. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4( v4_mapped_t, const address_v6& v6_addr); /// Create an IPv4-mapped IPv6 address from an IPv4 address. /** * @relates address_v6 */ ASIO_DECL address_v6 make_address_v6( v4_mapped_t, const address_v4& v4_addr); #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address_v6 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v6& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address_v6.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address_v6.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_V6_HPP galera-4-26.4.25/asio/asio/ip/udp.hpp000644 000164 177776 00000004536 15107057155 020323 0ustar00jenkinsnogroup000000 000000 // // ip/udp.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_UDP_HPP #define ASIO_IP_UDP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_datagram_socket.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Encapsulates the flags needed for UDP. /** * The asio::ip::udp class contains flags necessary for UDP sockets. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Safe. * * @par Concepts: * Protocol, InternetProtocol. */ class udp { public: /// The type of a UDP endpoint. typedef basic_endpoint endpoint; /// Construct to represent the IPv4 UDP protocol. static udp v4() { return udp(ASIO_OS_DEF(AF_INET)); } /// Construct to represent the IPv6 UDP protocol. static udp v6() { return udp(ASIO_OS_DEF(AF_INET6)); } /// Obtain an identifier for the type of the protocol. int type() const { return ASIO_OS_DEF(SOCK_DGRAM); } /// Obtain an identifier for the protocol. int protocol() const { return ASIO_OS_DEF(IPPROTO_UDP); } /// Obtain an identifier for the protocol family. int family() const { return family_; } /// The UDP socket type. typedef basic_datagram_socket socket; /// The UDP resolver type. typedef basic_resolver resolver; /// Compare two protocols for equality. friend bool operator==(const udp& p1, const udp& p2) { return p1.family_ == p2.family_; } /// Compare two protocols for inequality. friend bool operator!=(const udp& p1, const udp& p2) { return p1.family_ != p2.family_; } private: // Construct with a specific family. explicit udp(int protocol_family) : family_(protocol_family) { } int family_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_UDP_HPP galera-4-26.4.25/asio/asio/ip/address_v6_iterator.hpp000644 000164 177776 00000010511 15107057155 023472 0ustar00jenkinsnogroup000000 000000 // // ip/address_v6_iterator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V6_ITERATOR_HPP #define ASIO_IP_ADDRESS_V6_ITERATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address_v6.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template class basic_address_iterator; /// An input iterator that can be used for traversing IPv6 addresses. /** * In addition to satisfying the input iterator requirements, this iterator * also supports decrement. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template <> class basic_address_iterator { public: /// The type of the elements pointed to by the iterator. typedef address_v6 value_type; /// Distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of a pointer to an element pointed to by the iterator. typedef const address_v6* pointer; /// The type of a reference to an element pointed to by the iterator. typedef const address_v6& reference; /// Denotes that the iterator satisfies the input iterator requirements. typedef std::input_iterator_tag iterator_category; /// Construct an iterator that points to the specified address. basic_address_iterator(const address_v6& addr) ASIO_NOEXCEPT : address_(addr) { } /// Copy constructor. basic_address_iterator( const basic_address_iterator& other) ASIO_NOEXCEPT : address_(other.address_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_address_iterator(basic_address_iterator&& other) ASIO_NOEXCEPT : address_(ASIO_MOVE_CAST(address_v6)(other.address_)) { } #endif // defined(ASIO_HAS_MOVE) /// Assignment operator. basic_address_iterator& operator=( const basic_address_iterator& other) ASIO_NOEXCEPT { address_ = other.address_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move assignment operator. basic_address_iterator& operator=( basic_address_iterator&& other) ASIO_NOEXCEPT { address_ = ASIO_MOVE_CAST(address_v6)(other.address_); return *this; } #endif // defined(ASIO_HAS_MOVE) /// Dereference the iterator. const address_v6& operator*() const ASIO_NOEXCEPT { return address_; } /// Dereference the iterator. const address_v6* operator->() const ASIO_NOEXCEPT { return &address_; } /// Pre-increment operator. basic_address_iterator& operator++() ASIO_NOEXCEPT { for (int i = 15; i >= 0; --i) { if (address_.addr_.s6_addr[i] < 0xFF) { ++address_.addr_.s6_addr[i]; break; } address_.addr_.s6_addr[i] = 0; } return *this; } /// Post-increment operator. basic_address_iterator operator++(int) ASIO_NOEXCEPT { basic_address_iterator tmp(*this); ++*this; return tmp; } /// Pre-decrement operator. basic_address_iterator& operator--() ASIO_NOEXCEPT { for (int i = 15; i >= 0; --i) { if (address_.addr_.s6_addr[i] > 0) { --address_.addr_.s6_addr[i]; break; } address_.addr_.s6_addr[i] = 0xFF; } return *this; } /// Post-decrement operator. basic_address_iterator operator--(int) { basic_address_iterator tmp(*this); --*this; return tmp; } /// Compare two addresses for equality. friend bool operator==(const basic_address_iterator& a, const basic_address_iterator& b) { return a.address_ == b.address_; } /// Compare two addresses for inequality. friend bool operator!=(const basic_address_iterator& a, const basic_address_iterator& b) { return a.address_ != b.address_; } private: address_v6 address_; }; /// An input iterator that can be used for traversing IPv6 addresses. typedef basic_address_iterator address_v6_iterator; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ADDRESS_V6_ITERATOR_HPP galera-4-26.4.25/asio/asio/ip/basic_resolver_results.hpp000644 000164 177776 00000022413 15107057155 024310 0ustar00jenkinsnogroup000000 000000 // // ip/basic_resolver_results.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_RESULTS_HPP #define ASIO_IP_BASIC_RESOLVER_RESULTS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_utils.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// A range of entries produced by a resolver. /** * The asio::ip::basic_resolver_results class template is used to define * a range over the results returned by a resolver. * * The iterator's value_type, obtained when a results iterator is dereferenced, * is: @code const basic_resolver_entry @endcode * * @note For backward compatibility, basic_resolver_results is derived from * basic_resolver_iterator. This derivation is deprecated. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_results #if !defined(ASIO_NO_DEPRECATED) : public basic_resolver_iterator #else // !defined(ASIO_NO_DEPRECATED) : private basic_resolver_iterator #endif // !defined(ASIO_NO_DEPRECATED) { public: /// The protocol type associated with the results. typedef InternetProtocol protocol_type; /// The endpoint type associated with the results. typedef typename protocol_type::endpoint endpoint_type; /// The type of a value in the results range. typedef basic_resolver_entry value_type; /// The type of a const reference to a value in the range. typedef const value_type& const_reference; /// The type of a non-const reference to a value in the range. typedef value_type& reference; /// The type of an iterator into the range. typedef basic_resolver_iterator const_iterator; /// The type of an iterator into the range. typedef const_iterator iterator; /// Type used to represent the distance between two iterators in the range. typedef std::ptrdiff_t difference_type; /// Type used to represent a count of the elements in the range. typedef std::size_t size_type; /// Default constructor creates an empty range. basic_resolver_results() { } /// Copy constructor. basic_resolver_results(const basic_resolver_results& other) : basic_resolver_iterator(other) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. basic_resolver_results(basic_resolver_results&& other) : basic_resolver_iterator( ASIO_MOVE_CAST(basic_resolver_results)(other)) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Assignment operator. basic_resolver_results& operator=(const basic_resolver_results& other) { basic_resolver_iterator::operator=(other); return *this; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-assignment operator. basic_resolver_results& operator=(basic_resolver_results&& other) { basic_resolver_iterator::operator=( ASIO_MOVE_CAST(basic_resolver_results)(other)); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) #if !defined(GENERATING_DOCUMENTATION) // Create results from an addrinfo list returned by getaddrinfo. static basic_resolver_results create( asio::detail::addrinfo_type* address_info, const std::string& host_name, const std::string& service_name) { basic_resolver_results results; if (!address_info) return results; std::string actual_host_name = host_name; if (address_info->ai_canonname) actual_host_name = address_info->ai_canonname; results.values_.reset(new values_type); while (address_info) { if (address_info->ai_family == ASIO_OS_DEF(AF_INET) || address_info->ai_family == ASIO_OS_DEF(AF_INET6)) { using namespace std; // For memcpy. typename InternetProtocol::endpoint endpoint; endpoint.resize(static_cast(address_info->ai_addrlen)); memcpy(endpoint.data(), address_info->ai_addr, address_info->ai_addrlen); results.values_->push_back( basic_resolver_entry(endpoint, actual_host_name, service_name)); } address_info = address_info->ai_next; } return results; } // Create results from an endpoint, host name and service name. static basic_resolver_results create(const endpoint_type& endpoint, const std::string& host_name, const std::string& service_name) { basic_resolver_results results; results.values_.reset(new values_type); results.values_->push_back( basic_resolver_entry( endpoint, host_name, service_name)); return results; } // Create results from a sequence of endpoints, host and service name. template static basic_resolver_results create( EndpointIterator begin, EndpointIterator end, const std::string& host_name, const std::string& service_name) { basic_resolver_results results; if (begin != end) { results.values_.reset(new values_type); for (EndpointIterator ep_iter = begin; ep_iter != end; ++ep_iter) { results.values_->push_back( basic_resolver_entry( *ep_iter, host_name, service_name)); } } return results; } # if defined(ASIO_WINDOWS_RUNTIME) // Create results from a Windows Runtime list of EndpointPair objects. static basic_resolver_results create( Windows::Foundation::Collections::IVectorView< Windows::Networking::EndpointPair^>^ endpoints, const asio::detail::addrinfo_type& hints, const std::string& host_name, const std::string& service_name) { basic_resolver_results results; if (endpoints->Size) { results.values_.reset(new values_type); for (unsigned int i = 0; i < endpoints->Size; ++i) { auto pair = endpoints->GetAt(i); if (hints.ai_family == ASIO_OS_DEF(AF_INET) && pair->RemoteHostName->Type != Windows::Networking::HostNameType::Ipv4) continue; if (hints.ai_family == ASIO_OS_DEF(AF_INET6) && pair->RemoteHostName->Type != Windows::Networking::HostNameType::Ipv6) continue; results.values_->push_back( basic_resolver_entry( typename InternetProtocol::endpoint( ip::make_address( asio::detail::winrt_utils::string( pair->RemoteHostName->CanonicalName)), asio::detail::winrt_utils::integer( pair->RemoteServiceName)), host_name, service_name)); } } return results; } # endif // defined(ASIO_WINDOWS_RUNTIME) #endif // !defined(GENERATING_DOCUMENTATION) /// Get the number of entries in the results range. size_type size() const ASIO_NOEXCEPT { return this->values_ ? this->values_->size() : 0; } /// Get the maximum number of entries permitted in a results range. size_type max_size() const ASIO_NOEXCEPT { return this->values_ ? this->values_->max_size() : values_type().max_size(); } /// Determine whether the results range is empty. bool empty() const ASIO_NOEXCEPT { return this->values_ ? this->values_->empty() : true; } /// Obtain a begin iterator for the results range. const_iterator begin() const { basic_resolver_results tmp(*this); tmp.index_ = 0; return ASIO_MOVE_CAST(basic_resolver_results)(tmp); } /// Obtain an end iterator for the results range. const_iterator end() const { return const_iterator(); } /// Obtain a begin iterator for the results range. const_iterator cbegin() const { return begin(); } /// Obtain an end iterator for the results range. const_iterator cend() const { return end(); } /// Swap the results range with another. void swap(basic_resolver_results& that) ASIO_NOEXCEPT { if (this != &that) { this->values_.swap(that.values_); std::size_t index = this->index_; this->index_ = that.index_; that.index_ = index; } } /// Test two iterators for equality. friend bool operator==(const basic_resolver_results& a, const basic_resolver_results& b) { return a.equal(b); } /// Test two iterators for inequality. friend bool operator!=(const basic_resolver_results& a, const basic_resolver_results& b) { return !a.equal(b); } private: typedef std::vector > values_type; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_RESULTS_HPP galera-4-26.4.25/asio/asio/ip/network_v6.hpp000644 000164 177776 00000013765 15107057155 021643 0ustar00jenkinsnogroup000000 000000 // // ip/network_v6.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_NETWORK_V6_HPP #define ASIO_IP_NETWORK_V6_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/string_view.hpp" #include "asio/error_code.hpp" #include "asio/ip/address_v6_range.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Represents an IPv6 network. /** * The asio::ip::network_v6 class provides the ability to use and * manipulate IP version 6 networks. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class network_v6 { public: /// Default constructor. network_v6() ASIO_NOEXCEPT : address_(), prefix_length_(0) { } /// Construct a network based on the specified address and prefix length. ASIO_DECL network_v6(const address_v6& addr, unsigned short prefix_len); /// Copy constructor. network_v6(const network_v6& other) ASIO_NOEXCEPT : address_(other.address_), prefix_length_(other.prefix_length_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. network_v6(network_v6&& other) ASIO_NOEXCEPT : address_(ASIO_MOVE_CAST(address_v6)(other.address_)), prefix_length_(other.prefix_length_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another network. network_v6& operator=(const network_v6& other) ASIO_NOEXCEPT { address_ = other.address_; prefix_length_ = other.prefix_length_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another network. network_v6& operator=(network_v6&& other) ASIO_NOEXCEPT { address_ = ASIO_MOVE_CAST(address_v6)(other.address_); prefix_length_ = other.prefix_length_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// Obtain the address object specified when the network object was created. address_v6 address() const ASIO_NOEXCEPT { return address_; } /// Obtain the prefix length that was specified when the network object was /// created. unsigned short prefix_length() const ASIO_NOEXCEPT { return prefix_length_; } /// Obtain an address object that represents the network address. ASIO_DECL address_v6 network() const ASIO_NOEXCEPT; /// Obtain an address range corresponding to the hosts in the network. ASIO_DECL address_v6_range hosts() const ASIO_NOEXCEPT; /// Obtain the true network address, omitting any host bits. network_v6 canonical() const ASIO_NOEXCEPT { return network_v6(network(), prefix_length()); } /// Test if network is a valid host address. bool is_host() const ASIO_NOEXCEPT { return prefix_length_ == 128; } /// Test if a network is a real subnet of another network. ASIO_DECL bool is_subnet_of(const network_v6& other) const; /// Get the network as an address in dotted decimal format. ASIO_DECL std::string to_string() const; /// Get the network as an address in dotted decimal format. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// Compare two networks for equality. friend bool operator==(const network_v6& a, const network_v6& b) { return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_; } /// Compare two networks for inequality. friend bool operator!=(const network_v6& a, const network_v6& b) { return !(a == b); } private: address_v6 address_; unsigned short prefix_length_; }; /// Create an IPv6 network from an address and prefix length. /** * @relates address_v6 */ inline network_v6 make_network_v6( const address_v6& addr, unsigned short prefix_len) { return network_v6(addr, prefix_len); } /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6(const char* str); /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6( const char* str, asio::error_code& ec); /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6(const std::string& str); /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6( const std::string& str, asio::error_code& ec); #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6(string_view str); /// Create an IPv6 network from a string containing IP address and prefix /// length. /** * @relates network_v6 */ ASIO_DECL network_v6 make_network_v6( string_view str, asio::error_code& ec); #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) #if !defined(ASIO_NO_IOSTREAM) /// Output a network as a string. /** * Used to output a human-readable string for a specified network. * * @param os The output stream to which the string will be written. * * @param net The network to be written. * * @return The output stream. * * @relates asio::ip::address_v6 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const network_v6& net); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/network_v6.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/network_v6.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_NETWORK_V6_HPP galera-4-26.4.25/asio/asio/ip/network_v4.hpp000644 000164 177776 00000015362 15107057155 021634 0ustar00jenkinsnogroup000000 000000 // // ip/network_v4.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2014 Oliver Kowalke (oliver dot kowalke at gmail dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_NETWORK_V4_HPP #define ASIO_IP_NETWORK_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/string_view.hpp" #include "asio/error_code.hpp" #include "asio/ip/address_v4_range.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Represents an IPv4 network. /** * The asio::ip::network_v4 class provides the ability to use and * manipulate IP version 4 networks. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class network_v4 { public: /// Default constructor. network_v4() ASIO_NOEXCEPT : address_(), prefix_length_(0) { } /// Construct a network based on the specified address and prefix length. ASIO_DECL network_v4(const address_v4& addr, unsigned short prefix_len); /// Construct network based on the specified address and netmask. ASIO_DECL network_v4(const address_v4& addr, const address_v4& mask); /// Copy constructor. network_v4(const network_v4& other) ASIO_NOEXCEPT : address_(other.address_), prefix_length_(other.prefix_length_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. network_v4(network_v4&& other) ASIO_NOEXCEPT : address_(ASIO_MOVE_CAST(address_v4)(other.address_)), prefix_length_(other.prefix_length_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another network. network_v4& operator=(const network_v4& other) ASIO_NOEXCEPT { address_ = other.address_; prefix_length_ = other.prefix_length_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another network. network_v4& operator=(network_v4&& other) ASIO_NOEXCEPT { address_ = ASIO_MOVE_CAST(address_v4)(other.address_); prefix_length_ = other.prefix_length_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// Obtain the address object specified when the network object was created. address_v4 address() const ASIO_NOEXCEPT { return address_; } /// Obtain the prefix length that was specified when the network object was /// created. unsigned short prefix_length() const ASIO_NOEXCEPT { return prefix_length_; } /// Obtain the netmask that was specified when the network object was created. ASIO_DECL address_v4 netmask() const ASIO_NOEXCEPT; /// Obtain an address object that represents the network address. address_v4 network() const ASIO_NOEXCEPT { return address_v4(address_.to_uint() & netmask().to_uint()); } /// Obtain an address object that represents the network's broadcast address. address_v4 broadcast() const ASIO_NOEXCEPT { return address_v4(network().to_uint() | (netmask().to_uint() ^ 0xFFFFFFFF)); } /// Obtain an address range corresponding to the hosts in the network. ASIO_DECL address_v4_range hosts() const ASIO_NOEXCEPT; /// Obtain the true network address, omitting any host bits. network_v4 canonical() const ASIO_NOEXCEPT { return network_v4(network(), netmask()); } /// Test if network is a valid host address. bool is_host() const ASIO_NOEXCEPT { return prefix_length_ == 32; } /// Test if a network is a real subnet of another network. ASIO_DECL bool is_subnet_of(const network_v4& other) const; /// Get the network as an address in dotted decimal format. ASIO_DECL std::string to_string() const; /// Get the network as an address in dotted decimal format. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// Compare two networks for equality. friend bool operator==(const network_v4& a, const network_v4& b) { return a.address_ == b.address_ && a.prefix_length_ == b.prefix_length_; } /// Compare two networks for inequality. friend bool operator!=(const network_v4& a, const network_v4& b) { return !(a == b); } private: address_v4 address_; unsigned short prefix_length_; }; /// Create an IPv4 network from an address and prefix length. /** * @relates address_v4 */ inline network_v4 make_network_v4( const address_v4& addr, unsigned short prefix_len) { return network_v4(addr, prefix_len); } /// Create an IPv4 network from an address and netmask. /** * @relates address_v4 */ inline network_v4 make_network_v4( const address_v4& addr, const address_v4& mask) { return network_v4(addr, mask); } /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4(const char* str); /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4( const char* str, asio::error_code& ec); /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4(const std::string& str); /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4( const std::string& str, asio::error_code& ec); #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4(string_view str); /// Create an IPv4 network from a string containing IP address and prefix /// length. /** * @relates network_v4 */ ASIO_DECL network_v4 make_network_v4( string_view str, asio::error_code& ec); #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) #if !defined(ASIO_NO_IOSTREAM) /// Output a network as a string. /** * Used to output a human-readable string for a specified network. * * @param os The output stream to which the string will be written. * * @param net The network to be written. * * @return The output stream. * * @relates asio::ip::address_v4 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const network_v4& net); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/network_v4.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/network_v4.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_NETWORK_V4_HPP galera-4-26.4.25/asio/asio/ip/host_name.hpp000644 000164 177776 00000001730 15107057155 021501 0ustar00jenkinsnogroup000000 000000 // // ip/host_name.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_HOST_NAME_HPP #define ASIO_IP_HOST_NAME_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Get the current host name. ASIO_DECL std::string host_name(); /// Get the current host name. ASIO_DECL std::string host_name(asio::error_code& ec); } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/host_name.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_HOST_NAME_HPP galera-4-26.4.25/asio/asio/ip/detail/000755 000164 177776 00000000000 15107057160 020250 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ip/detail/impl/000755 000164 177776 00000000000 15107057160 021211 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ip/detail/impl/endpoint.ipp000644 000164 177776 00000012504 15107057155 023551 0ustar00jenkinsnogroup000000 000000 // // ip/detail/impl/endpoint.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP #define ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/socket_ops.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/ip/detail/endpoint.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { endpoint::endpoint() ASIO_NOEXCEPT : data_() { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = 0; data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY); } endpoint::endpoint(int family, unsigned short port_num) ASIO_NOEXCEPT : data_() { using namespace std; // For memcpy. if (family == ASIO_OS_DEF(AF_INET)) { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v4.sin_addr.s_addr = ASIO_OS_DEF(INADDR_ANY); } else { data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6); data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v6.sin6_flowinfo = 0; data_.v6.sin6_addr.s6_addr[0] = 0; data_.v6.sin6_addr.s6_addr[1] = 0; data_.v6.sin6_addr.s6_addr[2] = 0; data_.v6.sin6_addr.s6_addr[3] = 0; data_.v6.sin6_addr.s6_addr[4] = 0; data_.v6.sin6_addr.s6_addr[5] = 0; data_.v6.sin6_addr.s6_addr[6] = 0; data_.v6.sin6_addr.s6_addr[7] = 0; data_.v6.sin6_addr.s6_addr[8] = 0; data_.v6.sin6_addr.s6_addr[9] = 0; data_.v6.sin6_addr.s6_addr[10] = 0; data_.v6.sin6_addr.s6_addr[11] = 0; data_.v6.sin6_addr.s6_addr[12] = 0; data_.v6.sin6_addr.s6_addr[13] = 0; data_.v6.sin6_addr.s6_addr[14] = 0; data_.v6.sin6_addr.s6_addr[15] = 0; data_.v6.sin6_scope_id = 0; } } endpoint::endpoint(const asio::ip::address& addr, unsigned short port_num) ASIO_NOEXCEPT : data_() { using namespace std; // For memcpy. if (addr.is_v4()) { data_.v4.sin_family = ASIO_OS_DEF(AF_INET); data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v4.sin_addr.s_addr = asio::detail::socket_ops::host_to_network_long( addr.to_v4().to_uint()); } else { data_.v6.sin6_family = ASIO_OS_DEF(AF_INET6); data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); data_.v6.sin6_flowinfo = 0; asio::ip::address_v6 v6_addr = addr.to_v6(); asio::ip::address_v6::bytes_type bytes = v6_addr.to_bytes(); memcpy(data_.v6.sin6_addr.s6_addr, bytes.data(), 16); data_.v6.sin6_scope_id = static_cast( v6_addr.scope_id()); } } void endpoint::resize(std::size_t new_size) { if (new_size > sizeof(asio::detail::sockaddr_storage_type)) { asio::error_code ec(asio::error::invalid_argument); asio::detail::throw_error(ec); } } unsigned short endpoint::port() const ASIO_NOEXCEPT { if (is_v4()) { return asio::detail::socket_ops::network_to_host_short( data_.v4.sin_port); } else { return asio::detail::socket_ops::network_to_host_short( data_.v6.sin6_port); } } void endpoint::port(unsigned short port_num) ASIO_NOEXCEPT { if (is_v4()) { data_.v4.sin_port = asio::detail::socket_ops::host_to_network_short(port_num); } else { data_.v6.sin6_port = asio::detail::socket_ops::host_to_network_short(port_num); } } asio::ip::address endpoint::address() const ASIO_NOEXCEPT { using namespace std; // For memcpy. if (is_v4()) { return asio::ip::address_v4( asio::detail::socket_ops::network_to_host_long( data_.v4.sin_addr.s_addr)); } else { asio::ip::address_v6::bytes_type bytes; #if defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.data(), data_.v6.sin6_addr.s6_addr, 16); #else // defined(ASIO_HAS_STD_ARRAY) memcpy(bytes.elems, data_.v6.sin6_addr.s6_addr, 16); #endif // defined(ASIO_HAS_STD_ARRAY) return asio::ip::address_v6(bytes, data_.v6.sin6_scope_id); } } void endpoint::address(const asio::ip::address& addr) ASIO_NOEXCEPT { endpoint tmp_endpoint(addr, port()); data_ = tmp_endpoint.data_; } bool operator==(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT { return e1.address() == e2.address() && e1.port() == e2.port(); } bool operator<(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT { if (e1.address() < e2.address()) return true; if (e1.address() != e2.address()) return false; return e1.port() < e2.port(); } #if !defined(ASIO_NO_IOSTREAM) std::string endpoint::to_string() const { std::ostringstream tmp_os; tmp_os.imbue(std::locale::classic()); if (is_v4()) tmp_os << address(); else tmp_os << '[' << address() << ']'; tmp_os << ':' << port(); return tmp_os.str(); } #endif // !defined(ASIO_NO_IOSTREAM) } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_DETAIL_IMPL_ENDPOINT_IPP galera-4-26.4.25/asio/asio/ip/detail/endpoint.hpp000644 000164 177776 00000007221 15107057155 022607 0ustar00jenkinsnogroup000000 000000 // // ip/detail/endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_ENDPOINT_HPP #define ASIO_IP_DETAIL_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_types.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #include "asio/ip/address.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { // Helper class for implementating an IP endpoint. class endpoint { public: // Default constructor. ASIO_DECL endpoint() ASIO_NOEXCEPT; // Construct an endpoint using a family and port number. ASIO_DECL endpoint(int family, unsigned short port_num) ASIO_NOEXCEPT; // Construct an endpoint using an address and port number. ASIO_DECL endpoint(const asio::ip::address& addr, unsigned short port_num) ASIO_NOEXCEPT; // Copy constructor. endpoint(const endpoint& other) ASIO_NOEXCEPT : data_(other.data_) { } // Assign from another endpoint. endpoint& operator=(const endpoint& other) ASIO_NOEXCEPT { data_ = other.data_; return *this; } // Get the underlying endpoint in the native type. asio::detail::socket_addr_type* data() ASIO_NOEXCEPT { return &data_.base; } // Get the underlying endpoint in the native type. const asio::detail::socket_addr_type* data() const ASIO_NOEXCEPT { return &data_.base; } // Get the underlying size of the endpoint in the native type. std::size_t size() const ASIO_NOEXCEPT { if (is_v4()) return sizeof(asio::detail::sockaddr_in4_type); else return sizeof(asio::detail::sockaddr_in6_type); } // Set the underlying size of the endpoint in the native type. ASIO_DECL void resize(std::size_t new_size); // Get the capacity of the endpoint in the native type. std::size_t capacity() const ASIO_NOEXCEPT { return sizeof(data_); } // Get the port associated with the endpoint. ASIO_DECL unsigned short port() const ASIO_NOEXCEPT; // Set the port associated with the endpoint. ASIO_DECL void port(unsigned short port_num) ASIO_NOEXCEPT; // Get the IP address associated with the endpoint. ASIO_DECL asio::ip::address address() const ASIO_NOEXCEPT; // Set the IP address associated with the endpoint. ASIO_DECL void address( const asio::ip::address& addr) ASIO_NOEXCEPT; // Compare two endpoints for equality. ASIO_DECL friend bool operator==(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT; // Compare endpoints for ordering. ASIO_DECL friend bool operator<(const endpoint& e1, const endpoint& e2) ASIO_NOEXCEPT; // Determine whether the endpoint is IPv4. bool is_v4() const ASIO_NOEXCEPT { return data_.base.sa_family == ASIO_OS_DEF(AF_INET); } #if !defined(ASIO_NO_IOSTREAM) // Convert to a string. ASIO_DECL std::string to_string() const; #endif // !defined(ASIO_NO_IOSTREAM) private: // The underlying IP socket address. union data_union { asio::detail::socket_addr_type base; asio::detail::sockaddr_in4_type v4; asio::detail::sockaddr_in6_type v6; } data_; }; } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/detail/impl/endpoint.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_DETAIL_ENDPOINT_HPP galera-4-26.4.25/asio/asio/ip/detail/socket_option.hpp000644 000164 177776 00000033236 15107057155 023654 0ustar00jenkinsnogroup000000 000000 // // detail/socket_option.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_DETAIL_SOCKET_OPTION_HPP #define ASIO_IP_DETAIL_SOCKET_OPTION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/ip/address.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace detail { namespace socket_option { // Helper template for implementing multicast enable loopback options. template class multicast_enable_loopback { public: #if defined(__sun) || defined(__osf__) typedef unsigned char ipv4_value_type; typedef unsigned char ipv6_value_type; #elif defined(_AIX) || defined(__hpux) || defined(__QNXNTO__) typedef unsigned char ipv4_value_type; typedef unsigned int ipv6_value_type; #else typedef int ipv4_value_type; typedef int ipv6_value_type; #endif // Default constructor. multicast_enable_loopback() : ipv4_value_(0), ipv6_value_(0) { } // Construct with a specific option value. explicit multicast_enable_loopback(bool v) : ipv4_value_(v ? 1 : 0), ipv6_value_(v ? 1 : 0) { } // Set the value of the boolean. multicast_enable_loopback& operator=(bool v) { ipv4_value_ = v ? 1 : 0; ipv6_value_ = v ? 1 : 0; return *this; } // Get the current value of the boolean. bool value() const { return !!ipv4_value_; } // Convert to bool. operator bool() const { return !!ipv4_value_; } // Test for false. bool operator!() const { return !ipv4_value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the boolean data. template void* data(const Protocol& protocol) { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the address of the boolean data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the boolean data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } // Set the size of the boolean data. template void resize(const Protocol& protocol, std::size_t s) { if (protocol.family() == PF_INET6) { if (s != sizeof(ipv6_value_)) { std::length_error ex("multicast_enable_loopback socket option resize"); asio::detail::throw_exception(ex); } ipv4_value_ = ipv6_value_ ? 1 : 0; } else { if (s != sizeof(ipv4_value_)) { std::length_error ex("multicast_enable_loopback socket option resize"); asio::detail::throw_exception(ex); } ipv6_value_ = ipv4_value_ ? 1 : 0; } } private: ipv4_value_type ipv4_value_; ipv6_value_type ipv6_value_; }; // Helper template for implementing unicast hops options. template class unicast_hops { public: // Default constructor. unicast_hops() : value_(0) { } // Construct with a specific option value. explicit unicast_hops(int v) : value_(v) { } // Set the value of the option. unicast_hops& operator=(int v) { value_ = v; return *this; } // Get the current value of the option. int value() const { return value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the data. template int* data(const Protocol&) { return &value_; } // Get the address of the data. template const int* data(const Protocol&) const { return &value_; } // Get the size of the data. template std::size_t size(const Protocol&) const { return sizeof(value_); } // Set the size of the data. template void resize(const Protocol&, std::size_t s) { if (s != sizeof(value_)) { std::length_error ex("unicast hops socket option resize"); asio::detail::throw_exception(ex); } #if defined(__hpux) if (value_ < 0) value_ = value_ & 0xFF; #endif } private: int value_; }; // Helper template for implementing multicast hops options. template class multicast_hops { public: #if defined(ASIO_WINDOWS) && defined(UNDER_CE) typedef int ipv4_value_type; #else typedef unsigned char ipv4_value_type; #endif typedef int ipv6_value_type; // Default constructor. multicast_hops() : ipv4_value_(0), ipv6_value_(0) { } // Construct with a specific option value. explicit multicast_hops(int v) { if (v < 0 || v > 255) { std::out_of_range ex("multicast hops value out of range"); asio::detail::throw_exception(ex); } ipv4_value_ = (ipv4_value_type)v; ipv6_value_ = v; } // Set the value of the option. multicast_hops& operator=(int v) { if (v < 0 || v > 255) { std::out_of_range ex("multicast hops value out of range"); asio::detail::throw_exception(ex); } ipv4_value_ = (ipv4_value_type)v; ipv6_value_ = v; return *this; } // Get the current value of the option. int value() const { return ipv6_value_; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the data. template void* data(const Protocol& protocol) { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the address of the data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } // Set the size of the data. template void resize(const Protocol& protocol, std::size_t s) { if (protocol.family() == PF_INET6) { if (s != sizeof(ipv6_value_)) { std::length_error ex("multicast hops socket option resize"); asio::detail::throw_exception(ex); } if (ipv6_value_ < 0) ipv4_value_ = 0; else if (ipv6_value_ > 255) ipv4_value_ = 255; else ipv4_value_ = (ipv4_value_type)ipv6_value_; } else { if (s != sizeof(ipv4_value_)) { std::length_error ex("multicast hops socket option resize"); asio::detail::throw_exception(ex); } ipv6_value_ = ipv4_value_; } } private: ipv4_value_type ipv4_value_; ipv6_value_type ipv6_value_; }; // Helper template for implementing ip_mreq-based options. template class multicast_request { public: // Default constructor. multicast_request() : ipv4_value_(), // Zero-initialisation gives the "any" address. ipv6_value_() // Zero-initialisation gives the "any" address. { } // Construct with multicast address only. explicit multicast_request(const address& multicast_address) : ipv4_value_(), // Zero-initialisation gives the "any" address. ipv6_value_() // Zero-initialisation gives the "any" address. { if (multicast_address.is_v6()) { using namespace std; // For memcpy. address_v6 ipv6_address = multicast_address.to_v6(); address_v6::bytes_type bytes = ipv6_address.to_bytes(); memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16); ipv6_value_.ipv6mr_interface = ipv6_address.scope_id(); } else { ipv4_value_.imr_multiaddr.s_addr = asio::detail::socket_ops::host_to_network_long( multicast_address.to_v4().to_uint()); ipv4_value_.imr_interface.s_addr = asio::detail::socket_ops::host_to_network_long( address_v4::any().to_uint()); } } // Construct with multicast address and IPv4 address specifying an interface. explicit multicast_request(const address_v4& multicast_address, const address_v4& network_interface = address_v4::any()) : ipv6_value_() // Zero-initialisation gives the "any" address. { ipv4_value_.imr_multiaddr.s_addr = asio::detail::socket_ops::host_to_network_long( multicast_address.to_uint()); ipv4_value_.imr_interface.s_addr = asio::detail::socket_ops::host_to_network_long( network_interface.to_uint()); } // Construct with multicast address and IPv6 network interface index. explicit multicast_request( const address_v6& multicast_address, unsigned long network_interface = 0) : ipv4_value_() // Zero-initialisation gives the "any" address. { using namespace std; // For memcpy. address_v6::bytes_type bytes = multicast_address.to_bytes(); memcpy(ipv6_value_.ipv6mr_multiaddr.s6_addr, bytes.data(), 16); if (network_interface) ipv6_value_.ipv6mr_interface = network_interface; else ipv6_value_.ipv6mr_interface = multicast_address.scope_id(); } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the option data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the option data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } private: asio::detail::in4_mreq_type ipv4_value_; asio::detail::in6_mreq_type ipv6_value_; }; // Helper template for implementing options that specify a network interface. template class network_interface { public: // Default constructor. network_interface() { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( address_v4::any().to_uint()); ipv6_value_ = 0; } // Construct with IPv4 interface. explicit network_interface(const address_v4& ipv4_interface) { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( ipv4_interface.to_uint()); ipv6_value_ = 0; } // Construct with IPv6 interface. explicit network_interface(unsigned int ipv6_interface) { ipv4_value_.s_addr = asio::detail::socket_ops::host_to_network_long( address_v4::any().to_uint()); ipv6_value_ = ipv6_interface; } // Get the level of the socket option. template int level(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Level; return IPv4_Level; } // Get the name of the socket option. template int name(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return IPv6_Name; return IPv4_Name; } // Get the address of the option data. template const void* data(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return &ipv6_value_; return &ipv4_value_; } // Get the size of the option data. template std::size_t size(const Protocol& protocol) const { if (protocol.family() == PF_INET6) return sizeof(ipv6_value_); return sizeof(ipv4_value_); } private: asio::detail::in4_addr_type ipv4_value_; unsigned int ipv6_value_; }; } // namespace socket_option } // namespace detail } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_DETAIL_SOCKET_OPTION_HPP galera-4-26.4.25/asio/asio/ip/basic_endpoint.hpp000644 000164 177776 00000015700 15107057155 022507 0ustar00jenkinsnogroup000000 000000 // // ip/basic_endpoint.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_ENDPOINT_HPP #define ASIO_IP_BASIC_ENDPOINT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address.hpp" #include "asio/ip/detail/endpoint.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Describes an endpoint for a version-independent IP socket. /** * The asio::ip::basic_endpoint class template describes an endpoint that * may be associated with a particular socket. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Concepts: * Endpoint. */ template class basic_endpoint { public: /// The protocol type associated with the endpoint. typedef InternetProtocol protocol_type; /// The type of the endpoint structure. This type is dependent on the /// underlying implementation of the socket layer. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined data_type; #else typedef asio::detail::socket_addr_type data_type; #endif /// Default constructor. basic_endpoint() ASIO_NOEXCEPT : impl_() { } /// Construct an endpoint using a port number, specified in the host's byte /// order. The IP address will be the any address (i.e. INADDR_ANY or /// in6addr_any). This constructor would typically be used for accepting new /// connections. /** * @par Examples * To initialise an IPv4 TCP endpoint for port 1234, use: * @code * asio::ip::tcp::endpoint ep(asio::ip::tcp::v4(), 1234); * @endcode * * To specify an IPv6 UDP endpoint for port 9876, use: * @code * asio::ip::udp::endpoint ep(asio::ip::udp::v6(), 9876); * @endcode */ basic_endpoint(const InternetProtocol& internet_protocol, unsigned short port_num) ASIO_NOEXCEPT : impl_(internet_protocol.family(), port_num) { } /// Construct an endpoint using a port number and an IP address. This /// constructor may be used for accepting connections on a specific interface /// or for making a connection to a remote endpoint. basic_endpoint(const asio::ip::address& addr, unsigned short port_num) ASIO_NOEXCEPT : impl_(addr, port_num) { } /// Copy constructor. basic_endpoint(const basic_endpoint& other) ASIO_NOEXCEPT : impl_(other.impl_) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. basic_endpoint(basic_endpoint&& other) ASIO_NOEXCEPT : impl_(other.impl_) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Assign from another endpoint. basic_endpoint& operator=(const basic_endpoint& other) ASIO_NOEXCEPT { impl_ = other.impl_; return *this; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-assign from another endpoint. basic_endpoint& operator=(basic_endpoint&& other) ASIO_NOEXCEPT { impl_ = other.impl_; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// The protocol associated with the endpoint. protocol_type protocol() const ASIO_NOEXCEPT { if (impl_.is_v4()) return InternetProtocol::v4(); return InternetProtocol::v6(); } /// Get the underlying endpoint in the native type. data_type* data() ASIO_NOEXCEPT { return impl_.data(); } /// Get the underlying endpoint in the native type. const data_type* data() const ASIO_NOEXCEPT { return impl_.data(); } /// Get the underlying size of the endpoint in the native type. std::size_t size() const ASIO_NOEXCEPT { return impl_.size(); } /// Set the underlying size of the endpoint in the native type. void resize(std::size_t new_size) { impl_.resize(new_size); } /// Get the capacity of the endpoint in the native type. std::size_t capacity() const ASIO_NOEXCEPT { return impl_.capacity(); } /// Get the port associated with the endpoint. The port number is always in /// the host's byte order. unsigned short port() const ASIO_NOEXCEPT { return impl_.port(); } /// Set the port associated with the endpoint. The port number is always in /// the host's byte order. void port(unsigned short port_num) ASIO_NOEXCEPT { impl_.port(port_num); } /// Get the IP address associated with the endpoint. asio::ip::address address() const ASIO_NOEXCEPT { return impl_.address(); } /// Set the IP address associated with the endpoint. void address(const asio::ip::address& addr) ASIO_NOEXCEPT { impl_.address(addr); } /// Compare two endpoints for equality. friend bool operator==(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return e1.impl_ == e2.impl_; } /// Compare two endpoints for inequality. friend bool operator!=(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return !(e1 == e2); } /// Compare endpoints for ordering. friend bool operator<(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return e1.impl_ < e2.impl_; } /// Compare endpoints for ordering. friend bool operator>(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return e2.impl_ < e1.impl_; } /// Compare endpoints for ordering. friend bool operator<=(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return !(e2 < e1); } /// Compare endpoints for ordering. friend bool operator>=(const basic_endpoint& e1, const basic_endpoint& e2) ASIO_NOEXCEPT { return !(e1 < e2); } private: // The underlying IP endpoint. asio::ip::detail::endpoint impl_; }; #if !defined(ASIO_NO_IOSTREAM) /// Output an endpoint as a string. /** * Used to output a human-readable string for a specified endpoint. * * @param os The output stream to which the string will be written. * * @param endpoint The endpoint to be written. * * @return The output stream. * * @relates asio::ip::basic_endpoint */ template std::basic_ostream& operator<<( std::basic_ostream& os, const basic_endpoint& endpoint); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/basic_endpoint.hpp" #endif // ASIO_IP_BASIC_ENDPOINT_HPP galera-4-26.4.25/asio/asio/ip/basic_resolver_iterator.hpp000644 000164 177776 00000011573 15107057155 024445 0ustar00jenkinsnogroup000000 000000 // // ip/basic_resolver_iterator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP #define ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include "asio/detail/memory.hpp" #include "asio/detail/socket_ops.hpp" #include "asio/detail/socket_types.hpp" #include "asio/ip/basic_resolver_entry.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_utils.hpp" #endif // defined(ASIO_WINDOWS_RUNTIME) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An iterator over the entries produced by a resolver. /** * The asio::ip::basic_resolver_iterator class template is used to define * iterators over the results returned by a resolver. * * The iterator's value_type, obtained when the iterator is dereferenced, is: * @code const basic_resolver_entry @endcode * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_iterator { public: /// The type used for the distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of the value pointed to by the iterator. typedef basic_resolver_entry value_type; /// The type of the result of applying operator->() to the iterator. typedef const basic_resolver_entry* pointer; /// The type of the result of applying operator*() to the iterator. typedef const basic_resolver_entry& reference; /// The iterator category. typedef std::forward_iterator_tag iterator_category; /// Default constructor creates an end iterator. basic_resolver_iterator() : index_(0) { } /// Copy constructor. basic_resolver_iterator(const basic_resolver_iterator& other) : values_(other.values_), index_(other.index_) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. basic_resolver_iterator(basic_resolver_iterator&& other) : values_(ASIO_MOVE_CAST(values_ptr_type)(other.values_)), index_(other.index_) { other.index_ = 0; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Assignment operator. basic_resolver_iterator& operator=(const basic_resolver_iterator& other) { values_ = other.values_; index_ = other.index_; return *this; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-assignment operator. basic_resolver_iterator& operator=(basic_resolver_iterator&& other) { if (this != &other) { values_ = ASIO_MOVE_CAST(values_ptr_type)(other.values_); index_ = other.index_; other.index_ = 0; } return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Dereference an iterator. const basic_resolver_entry& operator*() const { return dereference(); } /// Dereference an iterator. const basic_resolver_entry* operator->() const { return &dereference(); } /// Increment operator (prefix). basic_resolver_iterator& operator++() { increment(); return *this; } /// Increment operator (postfix). basic_resolver_iterator operator++(int) { basic_resolver_iterator tmp(*this); ++*this; return tmp; } /// Test two iterators for equality. friend bool operator==(const basic_resolver_iterator& a, const basic_resolver_iterator& b) { return a.equal(b); } /// Test two iterators for inequality. friend bool operator!=(const basic_resolver_iterator& a, const basic_resolver_iterator& b) { return !a.equal(b); } protected: void increment() { if (++index_ == values_->size()) { // Reset state to match a default constructed end iterator. values_.reset(); index_ = 0; } } bool equal(const basic_resolver_iterator& other) const { if (!values_ && !other.values_) return true; if (values_ != other.values_) return false; return index_ == other.index_; } const basic_resolver_entry& dereference() const { return (*values_)[index_]; } typedef std::vector > values_type; typedef asio::detail::shared_ptr values_ptr_type; values_ptr_type values_; std::size_t index_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_ITERATOR_HPP galera-4-26.4.25/asio/asio/ip/unicast.hpp000644 000164 177776 00000003155 15107057155 021175 0ustar00jenkinsnogroup000000 000000 // // ip/unicast.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_UNICAST_HPP #define ASIO_IP_UNICAST_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/ip/detail/socket_option.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { namespace unicast { /// Socket option for time-to-live associated with outgoing unicast packets. /** * Implements the IPPROTO_IP/IP_UNICAST_TTL socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::unicast::hops option(4); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::ip::unicast::hops option; * socket.get_option(option); * int ttl = option.value(); * @endcode * * @par Concepts: * GettableSocketOption, SettableSocketOption. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined hops; #else typedef asio::ip::detail::socket_option::unicast_hops< ASIO_OS_DEF(IPPROTO_IP), ASIO_OS_DEF(IP_TTL), ASIO_OS_DEF(IPPROTO_IPV6), ASIO_OS_DEF(IPV6_UNICAST_HOPS)> hops; #endif } // namespace unicast } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_UNICAST_HPP galera-4-26.4.25/asio/asio/ip/resolver_query_base.hpp000644 000164 177776 00000002100 15107057155 023574 0ustar00jenkinsnogroup000000 000000 // // ip/resolver_query_base.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_RESOLVER_QUERY_BASE_HPP #define ASIO_IP_RESOLVER_QUERY_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/resolver_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// The resolver_query_base class is used as a base for the /// basic_resolver_query class templates to provide a common place to define /// the flag constants. class resolver_query_base : public resolver_base { protected: /// Protected destructor to prevent deletion through this type. ~resolver_query_base() { } }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_RESOLVER_QUERY_BASE_HPP galera-4-26.4.25/asio/asio/ip/address_v4_iterator.hpp000644 000164 177776 00000007763 15107057155 023507 0ustar00jenkinsnogroup000000 000000 // // ip/address_v4_iterator.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V4_ITERATOR_HPP #define ASIO_IP_ADDRESS_V4_ITERATOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address_v4.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template class basic_address_iterator; /// An input iterator that can be used for traversing IPv4 addresses. /** * In addition to satisfying the input iterator requirements, this iterator * also supports decrement. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template <> class basic_address_iterator { public: /// The type of the elements pointed to by the iterator. typedef address_v4 value_type; /// Distance between two iterators. typedef std::ptrdiff_t difference_type; /// The type of a pointer to an element pointed to by the iterator. typedef const address_v4* pointer; /// The type of a reference to an element pointed to by the iterator. typedef const address_v4& reference; /// Denotes that the iterator satisfies the input iterator requirements. typedef std::input_iterator_tag iterator_category; /// Construct an iterator that points to the specified address. basic_address_iterator(const address_v4& addr) ASIO_NOEXCEPT : address_(addr) { } /// Copy constructor. basic_address_iterator( const basic_address_iterator& other) ASIO_NOEXCEPT : address_(other.address_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_address_iterator(basic_address_iterator&& other) ASIO_NOEXCEPT : address_(ASIO_MOVE_CAST(address_v4)(other.address_)) { } #endif // defined(ASIO_HAS_MOVE) /// Assignment operator. basic_address_iterator& operator=( const basic_address_iterator& other) ASIO_NOEXCEPT { address_ = other.address_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move assignment operator. basic_address_iterator& operator=( basic_address_iterator&& other) ASIO_NOEXCEPT { address_ = ASIO_MOVE_CAST(address_v4)(other.address_); return *this; } #endif // defined(ASIO_HAS_MOVE) /// Dereference the iterator. const address_v4& operator*() const ASIO_NOEXCEPT { return address_; } /// Dereference the iterator. const address_v4* operator->() const ASIO_NOEXCEPT { return &address_; } /// Pre-increment operator. basic_address_iterator& operator++() ASIO_NOEXCEPT { address_ = address_v4((address_.to_uint() + 1) & 0xFFFFFFFF); return *this; } /// Post-increment operator. basic_address_iterator operator++(int) ASIO_NOEXCEPT { basic_address_iterator tmp(*this); ++*this; return tmp; } /// Pre-decrement operator. basic_address_iterator& operator--() ASIO_NOEXCEPT { address_ = address_v4((address_.to_uint() - 1) & 0xFFFFFFFF); return *this; } /// Post-decrement operator. basic_address_iterator operator--(int) { basic_address_iterator tmp(*this); --*this; return tmp; } /// Compare two addresses for equality. friend bool operator==(const basic_address_iterator& a, const basic_address_iterator& b) { return a.address_ == b.address_; } /// Compare two addresses for inequality. friend bool operator!=(const basic_address_iterator& a, const basic_address_iterator& b) { return a.address_ != b.address_; } private: address_v4 address_; }; /// An input iterator that can be used for traversing IPv4 addresses. typedef basic_address_iterator address_v4_iterator; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ADDRESS_V4_ITERATOR_HPP galera-4-26.4.25/asio/asio/ip/address_v4_range.hpp000644 000164 177776 00000006175 15107057155 022746 0ustar00jenkinsnogroup000000 000000 // // ip/address_v4_range.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V4_RANGE_HPP #define ASIO_IP_ADDRESS_V4_RANGE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ip/address_v4_iterator.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { template class basic_address_range; /// Represents a range of IPv4 addresses. /** * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template <> class basic_address_range { public: /// The type of an iterator that points into the range. typedef basic_address_iterator iterator; /// Construct an empty range. basic_address_range() ASIO_NOEXCEPT : begin_(address_v4()), end_(address_v4()) { } /// Construct an range that represents the given range of addresses. explicit basic_address_range(const iterator& first, const iterator& last) ASIO_NOEXCEPT : begin_(first), end_(last) { } /// Copy constructor. basic_address_range(const basic_address_range& other) ASIO_NOEXCEPT : begin_(other.begin_), end_(other.end_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. basic_address_range(basic_address_range&& other) ASIO_NOEXCEPT : begin_(ASIO_MOVE_CAST(iterator)(other.begin_)), end_(ASIO_MOVE_CAST(iterator)(other.end_)) { } #endif // defined(ASIO_HAS_MOVE) /// Assignment operator. basic_address_range& operator=( const basic_address_range& other) ASIO_NOEXCEPT { begin_ = other.begin_; end_ = other.end_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move assignment operator. basic_address_range& operator=( basic_address_range&& other) ASIO_NOEXCEPT { begin_ = ASIO_MOVE_CAST(iterator)(other.begin_); end_ = ASIO_MOVE_CAST(iterator)(other.end_); return *this; } #endif // defined(ASIO_HAS_MOVE) /// Obtain an iterator that points to the start of the range. iterator begin() const ASIO_NOEXCEPT { return begin_; } /// Obtain an iterator that points to the end of the range. iterator end() const ASIO_NOEXCEPT { return end_; } /// Determine whether the range is empty. bool empty() const ASIO_NOEXCEPT { return size() == 0; } /// Return the size of the range. std::size_t size() const ASIO_NOEXCEPT { return end_->to_uint() - begin_->to_uint(); } /// Find an address in the range. iterator find(const address_v4& addr) const ASIO_NOEXCEPT { return addr >= *begin_ && addr < *end_ ? iterator(addr) : end_; } private: iterator begin_; iterator end_; }; /// Represents a range of IPv4 addresses. typedef basic_address_range address_v4_range; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_ADDRESS_V4_RANGE_HPP galera-4-26.4.25/asio/asio/ip/basic_resolver.hpp000644 000164 177776 00000122337 15107057155 022535 0ustar00jenkinsnogroup000000 000000 // // ip/basic_resolver.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_HPP #define ASIO_IP_BASIC_RESOLVER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/basic_resolver_results.hpp" #include "asio/ip/resolver_base.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/winrt_resolver_service.hpp" #else # include "asio/detail/resolver_service.hpp" #endif #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { #if !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL) #define ASIO_IP_BASIC_RESOLVER_FWD_DECL // Forward declaration with defaulted arguments. template class basic_resolver; #endif // !defined(ASIO_IP_BASIC_RESOLVER_FWD_DECL) /// Provides endpoint resolution functionality. /** * The basic_resolver class template provides the ability to resolve a query * to a list of endpoints. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver : public resolver_base { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The protocol type. typedef InternetProtocol protocol_type; /// The endpoint type. typedef typename InternetProtocol::endpoint endpoint_type; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated.) The query type. typedef basic_resolver_query query; /// (Deprecated.) The iterator type. typedef basic_resolver_iterator iterator; #endif // !defined(ASIO_NO_DEPRECATED) /// The results type. typedef basic_resolver_results results_type; /// Construct with executor. /** * This constructor creates a basic_resolver. * * @param ex The I/O executor that the resolver will use, by default, to * dispatch handlers for any asynchronous operations performed on the * resolver. */ explicit basic_resolver(const executor_type& ex) : impl_(ex) { } /// Construct with execution context. /** * This constructor creates a basic_resolver. * * @param context An execution context which provides the I/O executor that * the resolver will use, by default, to dispatch handlers for any * asynchronous operations performed on the resolver. */ template explicit basic_resolver(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_resolver from another. /** * This constructor moves a resolver from one object to another. * * @param other The other basic_resolver object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_resolver(const executor_type&) constructor. */ basic_resolver(basic_resolver&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_resolver from another. /** * This assignment operator moves a resolver from one object to another. * Cancels any outstanding asynchronous operations associated with the target * object. * * @param other The other basic_resolver object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_resolver(const executor_type&) constructor. */ basic_resolver& operator=(basic_resolver&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the resolver. /** * This function destroys the resolver, cancelling any outstanding * asynchronous wait operations associated with the resolver as if by calling * @c cancel. */ ~basic_resolver() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Cancel any asynchronous operations that are waiting on the resolver. /** * This function forces the completion of any pending asynchronous * operations on the host resolver. The handler for each cancelled operation * will be invoked with the asio::error::operation_aborted error code. */ void cancel() { return impl_.get_service().cancel(impl_.get_implementation()); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use overload with separate host and service parameters.) /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve a query into a list of endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. */ results_type resolve(const query& q) { asio::error_code ec; results_type r = impl_.get_service().resolve( impl_.get_implementation(), q, ec); asio::detail::throw_error(ec, "resolve"); return r; } /// (Deprecated: Use overload with separate host and service parameters.) /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve a query into a list of endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. */ results_type resolve(const query& q, asio::error_code& ec) { return impl_.get_service().resolve(impl_.get_implementation(), q, ec); } #endif // !defined(ASIO_NO_DEPRECATED) /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service) { return resolve(host, service, resolver_base::flags()); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, asio::error_code& ec) { return resolve(host, service, resolver_base::flags(), ec); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags) { asio::error_code ec; basic_resolver_query q(static_cast(host), static_cast(service), resolve_flags); results_type r = impl_.get_service().resolve( impl_.get_implementation(), q, ec); asio::detail::throw_error(ec, "resolve"); return r; } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags, asio::error_code& ec) { basic_resolver_query q(static_cast(host), static_cast(service), resolve_flags); return impl_.get_service().resolve(impl_.get_implementation(), q, ec); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service) { return resolve(protocol, host, service, resolver_base::flags()); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, asio::error_code& ec) { return resolve(protocol, host, service, resolver_base::flags(), ec); } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags) { asio::error_code ec; basic_resolver_query q( protocol, static_cast(host), static_cast(service), resolve_flags); results_type r = impl_.get_service().resolve( impl_.get_implementation(), q, ec); asio::detail::throw_error(ec, "resolve"); return r; } /// Perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ results_type resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags, asio::error_code& ec) { basic_resolver_query q( protocol, static_cast(host), static_cast(service), resolve_flags); return impl_.get_service().resolve(impl_.get_implementation(), q, ec); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use overload with separate host and service parameters.) /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to asynchronously resolve a query into a list of * endpoint entries. * * @param q A query object that determines what endpoints will be returned. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(const query& q, ASIO_MOVE_ARG(ResolveHandler) handler) { return asio::async_initiate( initiate_async_resolve(), handler, this, q); } #endif // !defined(ASIO_NO_DEPRECATED) /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, ASIO_MOVE_ARG(ResolveHandler) handler) { return async_resolve(host, service, resolver_base::flags(), ASIO_MOVE_CAST(ResolveHandler)(handler)); } /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags, ASIO_MOVE_ARG(ResolveHandler) handler) { basic_resolver_query q(static_cast(host), static_cast(service), resolve_flags); return asio::async_initiate( initiate_async_resolve(), handler, this, q); } /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, ASIO_MOVE_ARG(ResolveHandler) handler) { return async_resolve(protocol, host, service, resolver_base::flags(), ASIO_MOVE_CAST(ResolveHandler)(handler)); } /// Asynchronously perform forward resolution of a query to a list of entries. /** * This function is used to resolve host and service names into a list of * endpoint entries. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. See the @ref resolver_base documentation for the set of * available flags. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(const protocol_type& protocol, ASIO_STRING_VIEW_PARAM host, ASIO_STRING_VIEW_PARAM service, resolver_base::flags resolve_flags, ASIO_MOVE_ARG(ResolveHandler) handler) { basic_resolver_query q( protocol, static_cast(host), static_cast(service), resolve_flags); return asio::async_initiate( initiate_async_resolve(), handler, this, q); } /// Perform reverse resolution of an endpoint to a list of entries. /** * This function is used to resolve an endpoint into a list of endpoint * entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @returns A range object representing the list of endpoint entries. A * successful call to this function is guaranteed to return a non-empty * range. * * @throws asio::system_error Thrown on failure. */ results_type resolve(const endpoint_type& e) { asio::error_code ec; results_type i = impl_.get_service().resolve( impl_.get_implementation(), e, ec); asio::detail::throw_error(ec, "resolve"); return i; } /// Perform reverse resolution of an endpoint to a list of entries. /** * This function is used to resolve an endpoint into a list of endpoint * entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @param ec Set to indicate what error occurred, if any. * * @returns A range object representing the list of endpoint entries. An * empty range is returned if an error occurs. A successful call to this * function is guaranteed to return a non-empty range. */ results_type resolve(const endpoint_type& e, asio::error_code& ec) { return impl_.get_service().resolve(impl_.get_implementation(), e, ec); } /// Asynchronously perform reverse resolution of an endpoint to a list of /// entries. /** * This function is used to asynchronously resolve an endpoint into a list of * endpoint entries. * * @param e An endpoint object that determines what endpoints will be * returned. * * @param handler The handler to be called when the resolve operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * resolver::results_type results // Resolved endpoints as a range. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * A successful resolve operation is guaranteed to pass a non-empty range to * the handler. */ template ASIO_INITFN_RESULT_TYPE(ResolveHandler, void (asio::error_code, results_type)) async_resolve(const endpoint_type& e, ASIO_MOVE_ARG(ResolveHandler) handler) { return asio::async_initiate( initiate_async_resolve(), handler, this, e); } private: // Disallow copying and assignment. basic_resolver(const basic_resolver&) ASIO_DELETED; basic_resolver& operator=(const basic_resolver&) ASIO_DELETED; struct initiate_async_resolve { template void operator()(ASIO_MOVE_ARG(ResolveHandler) handler, basic_resolver* self, const Query& q) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ResolveHandler. ASIO_RESOLVE_HANDLER_CHECK( ResolveHandler, handler, results_type) type_check; asio::detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_resolve( self->impl_.get_implementation(), q, handler2.value, self->impl_.get_implementation_executor()); } }; # if defined(ASIO_WINDOWS_RUNTIME) asio::detail::io_object_impl< asio::detail::winrt_resolver_service, Executor> impl_; # else asio::detail::io_object_impl< asio::detail::resolver_service, Executor> impl_; # endif }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_HPP galera-4-26.4.25/asio/asio/ip/address.hpp000644 000164 177776 00000017014 15107057155 021153 0ustar00jenkinsnogroup000000 000000 // // ip/address.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_HPP #define ASIO_IP_ADDRESS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/throw_exception.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error_code.hpp" #include "asio/ip/address_v4.hpp" #include "asio/ip/address_v6.hpp" #include "asio/ip/bad_address_cast.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Implements version-independent IP addresses. /** * The asio::ip::address class provides the ability to use either IP * version 4 or version 6 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address { public: /// Default constructor. ASIO_DECL address() ASIO_NOEXCEPT; /// Construct an address from an IPv4 address. ASIO_DECL address( const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT; /// Construct an address from an IPv6 address. ASIO_DECL address( const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT; /// Copy constructor. ASIO_DECL address(const address& other) ASIO_NOEXCEPT; #if defined(ASIO_HAS_MOVE) /// Move constructor. ASIO_DECL address(address&& other) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. ASIO_DECL address& operator=(const address& other) ASIO_NOEXCEPT; #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. ASIO_DECL address& operator=(address&& other) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_MOVE) /// Assign from an IPv4 address. ASIO_DECL address& operator=( const asio::ip::address_v4& ipv4_address) ASIO_NOEXCEPT; /// Assign from an IPv6 address. ASIO_DECL address& operator=( const asio::ip::address_v6& ipv6_address) ASIO_NOEXCEPT; /// Get whether the address is an IP version 4 address. bool is_v4() const ASIO_NOEXCEPT { return type_ == ipv4; } /// Get whether the address is an IP version 6 address. bool is_v6() const ASIO_NOEXCEPT { return type_ == ipv6; } /// Get the address as an IP version 4 address. ASIO_DECL asio::ip::address_v4 to_v4() const; /// Get the address as an IP version 6 address. ASIO_DECL asio::ip::address_v6 to_v6() const; /// Get the address as a string. ASIO_DECL std::string to_string() const; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use other overload.) Get the address as a string. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// (Deprecated: Use make_address().) Create an address from an IPv4 address /// string in dotted decimal form, or from an IPv6 address in hexadecimal /// notation. static address from_string(const char* str); /// (Deprecated: Use make_address().) Create an address from an IPv4 address /// string in dotted decimal form, or from an IPv6 address in hexadecimal /// notation. static address from_string(const char* str, asio::error_code& ec); /// (Deprecated: Use make_address().) Create an address from an IPv4 address /// string in dotted decimal form, or from an IPv6 address in hexadecimal /// notation. static address from_string(const std::string& str); /// (Deprecated: Use make_address().) Create an address from an IPv4 address /// string in dotted decimal form, or from an IPv6 address in hexadecimal /// notation. static address from_string( const std::string& str, asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT; /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT; /// Compare two addresses for equality. ASIO_DECL friend bool operator==(const address& a1, const address& a2) ASIO_NOEXCEPT; /// Compare two addresses for inequality. friend bool operator!=(const address& a1, const address& a2) ASIO_NOEXCEPT { return !(a1 == a2); } /// Compare addresses for ordering. ASIO_DECL friend bool operator<(const address& a1, const address& a2) ASIO_NOEXCEPT; /// Compare addresses for ordering. friend bool operator>(const address& a1, const address& a2) ASIO_NOEXCEPT { return a2 < a1; } /// Compare addresses for ordering. friend bool operator<=(const address& a1, const address& a2) ASIO_NOEXCEPT { return !(a2 < a1); } /// Compare addresses for ordering. friend bool operator>=(const address& a1, const address& a2) ASIO_NOEXCEPT { return !(a1 < a2); } private: // The type of the address. enum { ipv4, ipv6 } type_; // The underlying IPv4 address. asio::ip::address_v4 ipv4_address_; // The underlying IPv6 address. asio::ip::address_v6 ipv6_address_; }; /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(const char* str); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(const char* str, asio::error_code& ec) ASIO_NOEXCEPT; /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(const std::string& str); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT; #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(string_view str); /// Create an address from an IPv4 address string in dotted decimal form, /// or from an IPv6 address in hexadecimal notation. /** * @relates address */ ASIO_DECL address make_address(string_view str, asio::error_code& ec) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_HPP galera-4-26.4.25/asio/asio/ip/basic_resolver_query.hpp000644 000164 177776 00000022032 15107057155 023751 0ustar00jenkinsnogroup000000 000000 // // ip/basic_resolver_query.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_BASIC_RESOLVER_QUERY_HPP #define ASIO_IP_BASIC_RESOLVER_QUERY_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/socket_ops.hpp" #include "asio/ip/resolver_query_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// An query to be passed to a resolver. /** * The asio::ip::basic_resolver_query class template describes a query * that can be passed to a resolver. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_resolver_query : public resolver_query_base { public: /// The protocol type associated with the endpoint query. typedef InternetProtocol protocol_type; /// Construct with specified service name for any protocol. /** * This constructor is typically used to perform name resolution for local * service binding. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for local service * binding. * * @note On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const std::string& service, resolver_query_base::flags resolve_flags = passive | address_configured) : hints_(), host_name_(), service_name_(service) { typename InternetProtocol::endpoint endpoint; hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = PF_UNSPEC; hints_.ai_socktype = endpoint.protocol().type(); hints_.ai_protocol = endpoint.protocol().protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified service name for a given protocol. /** * This constructor is typically used to perform name resolution for local * service binding with a specific protocol version. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for local service * binding. * * @note On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const protocol_type& protocol, const std::string& service, resolver_query_base::flags resolve_flags = passive | address_configured) : hints_(), host_name_(), service_name_(service) { hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = protocol.family(); hints_.ai_socktype = protocol.type(); hints_.ai_protocol = protocol.protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified host name and service name for any protocol. /** * This constructor is typically used to perform name resolution for * communication with remote hosts. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const std::string& host, const std::string& service, resolver_query_base::flags resolve_flags = address_configured) : hints_(), host_name_(host), service_name_(service) { typename InternetProtocol::endpoint endpoint; hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = ASIO_OS_DEF(AF_UNSPEC); hints_.ai_socktype = endpoint.protocol().type(); hints_.ai_protocol = endpoint.protocol().protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Construct with specified host name and service name for a given protocol. /** * This constructor is typically used to perform name resolution for * communication with remote hosts. * * @param protocol A protocol object, normally representing either the IPv4 or * IPv6 version of an internet protocol. * * @param host A string identifying a location. May be a descriptive name or * a numeric address string. If an empty string and the passive flag has been * specified, the resolved endpoints are suitable for local service binding. * If an empty string and passive is not specified, the resolved endpoints * will use the loopback address. * * @param service A string identifying the requested service. This may be a * descriptive name or a numeric string corresponding to a port number. May * be an empty string, in which case all resolved endpoints will have a port * number of 0. * * @param resolve_flags A set of flags that determine how name resolution * should be performed. The default flags are suitable for communication with * remote hosts. * * @note On POSIX systems, host names may be locally defined in the file * /etc/hosts. On Windows, host names may be defined in the file * c:\\windows\\system32\\drivers\\etc\\hosts. Remote host name * resolution is performed using DNS. Operating systems may use additional * locations when resolving host names (such as NETBIOS names on Windows). * * On POSIX systems, service names are typically defined in the file * /etc/services. On Windows, service names may be found in the file * c:\\windows\\system32\\drivers\\etc\\services. Operating systems * may use additional locations when resolving service names. */ basic_resolver_query(const protocol_type& protocol, const std::string& host, const std::string& service, resolver_query_base::flags resolve_flags = address_configured) : hints_(), host_name_(host), service_name_(service) { hints_.ai_flags = static_cast(resolve_flags); hints_.ai_family = protocol.family(); hints_.ai_socktype = protocol.type(); hints_.ai_protocol = protocol.protocol(); hints_.ai_addrlen = 0; hints_.ai_canonname = 0; hints_.ai_addr = 0; hints_.ai_next = 0; } /// Get the hints associated with the query. const asio::detail::addrinfo_type& hints() const { return hints_; } /// Get the host name associated with the query. std::string host_name() const { return host_name_; } /// Get the service name associated with the query. std::string service_name() const { return service_name_; } private: asio::detail::addrinfo_type hints_; std::string host_name_; std::string service_name_; }; } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IP_BASIC_RESOLVER_QUERY_HPP galera-4-26.4.25/asio/asio/ip/address_v4.hpp000644 000164 177776 00000022445 15107057155 021570 0ustar00jenkinsnogroup000000 000000 // // ip/address_v4.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IP_ADDRESS_V4_HPP #define ASIO_IP_ADDRESS_V4_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/array.hpp" #include "asio/detail/cstdint.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/winsock_init.hpp" #include "asio/error_code.hpp" #if !defined(ASIO_NO_IOSTREAM) # include #endif // !defined(ASIO_NO_IOSTREAM) #include "asio/detail/push_options.hpp" namespace asio { namespace ip { /// Implements IP version 4 style addresses. /** * The asio::ip::address_v4 class provides the ability to use and * manipulate IP version 4 addresses. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ class address_v4 { public: /// The type used to represent an address as an unsigned integer. typedef uint_least32_t uint_type; /// The type used to represent an address as an array of bytes. /** * @note This type is defined in terms of the C++0x template @c std::array * when it is available. Otherwise, it uses @c boost:array. */ #if defined(GENERATING_DOCUMENTATION) typedef array bytes_type; #else typedef asio::detail::array bytes_type; #endif /// Default constructor. address_v4() ASIO_NOEXCEPT { addr_.s_addr = 0; } /// Construct an address from raw bytes. ASIO_DECL explicit address_v4(const bytes_type& bytes); /// Construct an address from an unsigned integer in host byte order. ASIO_DECL explicit address_v4(uint_type addr); /// Copy constructor. address_v4(const address_v4& other) ASIO_NOEXCEPT : addr_(other.addr_) { } #if defined(ASIO_HAS_MOVE) /// Move constructor. address_v4(address_v4&& other) ASIO_NOEXCEPT : addr_(other.addr_) { } #endif // defined(ASIO_HAS_MOVE) /// Assign from another address. address_v4& operator=(const address_v4& other) ASIO_NOEXCEPT { addr_ = other.addr_; return *this; } #if defined(ASIO_HAS_MOVE) /// Move-assign from another address. address_v4& operator=(address_v4&& other) ASIO_NOEXCEPT { addr_ = other.addr_; return *this; } #endif // defined(ASIO_HAS_MOVE) /// Get the address in bytes, in network byte order. ASIO_DECL bytes_type to_bytes() const ASIO_NOEXCEPT; /// Get the address as an unsigned integer in host byte order ASIO_DECL uint_type to_uint() const ASIO_NOEXCEPT; #if !defined(ASIO_NO_DEPRECATED) /// Get the address as an unsigned long in host byte order ASIO_DECL unsigned long to_ulong() const; #endif // !defined(ASIO_NO_DEPRECATED) /// Get the address as a string in dotted decimal format. ASIO_DECL std::string to_string() const; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use other overload.) Get the address as a string in dotted /// decimal format. ASIO_DECL std::string to_string(asio::error_code& ec) const; /// (Deprecated: Use make_address_v4().) Create an address from an IP address /// string in dotted decimal form. static address_v4 from_string(const char* str); /// (Deprecated: Use make_address_v4().) Create an address from an IP address /// string in dotted decimal form. static address_v4 from_string( const char* str, asio::error_code& ec); /// (Deprecated: Use make_address_v4().) Create an address from an IP address /// string in dotted decimal form. static address_v4 from_string(const std::string& str); /// (Deprecated: Use make_address_v4().) Create an address from an IP address /// string in dotted decimal form. static address_v4 from_string( const std::string& str, asio::error_code& ec); #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the address is a loopback address. ASIO_DECL bool is_loopback() const ASIO_NOEXCEPT; /// Determine whether the address is unspecified. ASIO_DECL bool is_unspecified() const ASIO_NOEXCEPT; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use network_v4 class.) Determine whether the address is a /// class A address. ASIO_DECL bool is_class_a() const; /// (Deprecated: Use network_v4 class.) Determine whether the address is a /// class B address. ASIO_DECL bool is_class_b() const; /// (Deprecated: Use network_v4 class.) Determine whether the address is a /// class C address. ASIO_DECL bool is_class_c() const; #endif // !defined(ASIO_NO_DEPRECATED) /// Determine whether the address is a multicast address. ASIO_DECL bool is_multicast() const ASIO_NOEXCEPT; /// Compare two addresses for equality. friend bool operator==(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.addr_.s_addr == a2.addr_.s_addr; } /// Compare two addresses for inequality. friend bool operator!=(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.addr_.s_addr != a2.addr_.s_addr; } /// Compare addresses for ordering. friend bool operator<(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.to_uint() < a2.to_uint(); } /// Compare addresses for ordering. friend bool operator>(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.to_uint() > a2.to_uint(); } /// Compare addresses for ordering. friend bool operator<=(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.to_uint() <= a2.to_uint(); } /// Compare addresses for ordering. friend bool operator>=(const address_v4& a1, const address_v4& a2) ASIO_NOEXCEPT { return a1.to_uint() >= a2.to_uint(); } /// Obtain an address object that represents any address. static address_v4 any() ASIO_NOEXCEPT { return address_v4(); } /// Obtain an address object that represents the loopback address. static address_v4 loopback() ASIO_NOEXCEPT { return address_v4(0x7F000001); } /// Obtain an address object that represents the broadcast address. static address_v4 broadcast() ASIO_NOEXCEPT { return address_v4(0xFFFFFFFF); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use network_v4 class.) Obtain an address object that /// represents the broadcast address that corresponds to the specified /// address and netmask. ASIO_DECL static address_v4 broadcast( const address_v4& addr, const address_v4& mask); /// (Deprecated: Use network_v4 class.) Obtain the netmask that corresponds /// to the address, based on its address class. ASIO_DECL static address_v4 netmask(const address_v4& addr); #endif // !defined(ASIO_NO_DEPRECATED) private: // The underlying IPv4 address. asio::detail::in4_addr_type addr_; }; /// Create an IPv4 address from raw bytes in network order. /** * @relates address_v4 */ inline address_v4 make_address_v4(const address_v4::bytes_type& bytes) { return address_v4(bytes); } /// Create an IPv4 address from an unsigned integer in host byte order. /** * @relates address_v4 */ inline address_v4 make_address_v4(address_v4::uint_type addr) { return address_v4(addr); } /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(const char* str); /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(const char* str, asio::error_code& ec) ASIO_NOEXCEPT; /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(const std::string& str); /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(const std::string& str, asio::error_code& ec) ASIO_NOEXCEPT; #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(string_view str); /// Create an IPv4 address from an IP address string in dotted decimal form. /** * @relates address_v4 */ ASIO_DECL address_v4 make_address_v4(string_view str, asio::error_code& ec) ASIO_NOEXCEPT; #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) #if !defined(ASIO_NO_IOSTREAM) /// Output an address as a string. /** * Used to output a human-readable string for a specified address. * * @param os The output stream to which the string will be written. * * @param addr The address to be written. * * @return The output stream. * * @relates asio::ip::address_v4 */ template std::basic_ostream& operator<<( std::basic_ostream& os, const address_v4& addr); #endif // !defined(ASIO_NO_IOSTREAM) } // namespace ip } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ip/impl/address_v4.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ip/impl/address_v4.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_IP_ADDRESS_V4_HPP galera-4-26.4.25/asio/asio/is_write_buffered.hpp000644 000164 177776 00000003046 15107057155 022605 0ustar00jenkinsnogroup000000 000000 // // is_write_buffered.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_IS_WRITE_BUFFERED_HPP #define ASIO_IS_WRITE_BUFFERED_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/buffered_write_stream_fwd.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace detail { template char is_write_buffered_helper(buffered_stream* s); template char is_write_buffered_helper(buffered_write_stream* s); struct is_write_buffered_big_type { char data[10]; }; is_write_buffered_big_type is_write_buffered_helper(...); } // namespace detail /// The is_write_buffered class is a traits class that may be used to determine /// whether a stream type supports buffering of written data. template class is_write_buffered { public: #if defined(GENERATING_DOCUMENTATION) /// The value member is true only if the Stream type supports buffering of /// written data. static const bool value; #else ASIO_STATIC_CONSTANT(bool, value = sizeof(detail::is_write_buffered_helper((Stream*)0)) == 1); #endif }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_IS_WRITE_BUFFERED_HPP galera-4-26.4.25/asio/asio/compose.hpp000644 000164 177776 00000010327 15107057155 020563 0ustar00jenkinsnogroup000000 000000 // // compose.hpp // ~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_COMPOSE_HPP #define ASIO_COMPOSE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/push_options.hpp" namespace asio { #if defined(ASIO_HAS_VARIADIC_TEMPLATES) \ || defined(GENERATING_DOCUMENTATION) /// Launch an asynchronous operation with a stateful implementation. /** * The async_compose function simplifies the implementation of composed * asynchronous operations automatically wrapping a stateful function object * with a conforming intermediate completion handler. * * @param implementation A function object that contains the implementation of * the composed asynchronous operation. The first argument to the function * object is a non-const reference to the enclosing intermediate completion * handler. The remaining arguments are any arguments that originate from the * completion handlers of any asynchronous operations performed by the * implementation. * @param token The completion token. * * @param io_objects_or_executors Zero or more I/O objects or I/O executors for * which outstanding work must be maintained. * * @par Example: * * @code struct async_echo_implementation * { * tcp::socket& socket_; * asio::mutable_buffer buffer_; * enum { starting, reading, writing } state_; * * template * void operator()(Self& self, * asio::error_code error = {}, * std::size_t n = 0) * { * switch (state_) * { * case starting: * state_ = reading; * socket_.async_read_some( * buffer_, std::move(self)); * break; * case reading: * if (error) * { * self.complete(error, 0); * } * else * { * state_ = writing; * asio::async_write(socket_, buffer_, * asio::transfer_exactly(n), * std::move(self)); * } * break; * case writing: * self.complete(error, n); * break; * } * } * }; * * template * auto async_echo(tcp::socket& socket, * asio::mutable_buffer buffer, * CompletionToken&& token) -> * typename asio::async_result< * typename std::decay::type, * void(asio::error_code, std::size_t)>::return_type * { * return asio::async_compose( * async_echo_implementation{socket, buffer, * async_echo_implementation::starting}, * token, socket); * } @endcode */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) async_compose(ASIO_MOVE_ARG(Implementation) implementation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors); #else // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) template ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) async_compose(ASIO_MOVE_ARG(Implementation) implementation, ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token); #define ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \ template \ ASIO_INITFN_RESULT_TYPE(CompletionToken, Signature) \ async_compose(ASIO_MOVE_ARG(Implementation) implementation, \ ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \ ASIO_VARIADIC_MOVE_PARAMS(n)); /**/ ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_ASYNC_COMPOSE_DEF) #undef ASIO_PRIVATE_ASYNC_COMPOSE_DEF #endif // defined(ASIO_HAS_VARIADIC_TEMPLATES) // || defined(GENERATING_DOCUMENTATION) } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/compose.hpp" #endif // ASIO_COMPOSE_HPP galera-4-26.4.25/asio/asio/basic_streambuf.hpp000644 000164 177776 00000032212 15107057155 022244 0ustar00jenkinsnogroup000000 000000 // // basic_streambuf.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_STREAMBUF_HPP #define ASIO_BASIC_STREAMBUF_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include #include #include #include #include "asio/basic_streambuf_fwd.hpp" #include "asio/buffer.hpp" #include "asio/detail/limits.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Automatically resizable buffer class based on std::streambuf. /** * The @c basic_streambuf class is derived from @c std::streambuf to associate * the streambuf's input and output sequences with one or more character * arrays. These character arrays are internal to the @c basic_streambuf * object, but direct access to the array elements is provided to permit them * to be used efficiently with I/O operations. Characters written to the output * sequence of a @c basic_streambuf object are appended to the input sequence * of the same object. * * The @c basic_streambuf class's public interface is intended to permit the * following implementation strategies: * * @li A single contiguous character array, which is reallocated as necessary * to accommodate changes in the size of the character sequence. This is the * implementation approach currently used in Asio. * * @li A sequence of one or more character arrays, where each array is of the * same size. Additional character array objects are appended to the sequence * to accommodate changes in the size of the character sequence. * * @li A sequence of one or more character arrays of varying sizes. Additional * character array objects are appended to the sequence to accommodate changes * in the size of the character sequence. * * The constructor for basic_streambuf accepts a @c size_t argument specifying * the maximum of the sum of the sizes of the input sequence and output * sequence. During the lifetime of the @c basic_streambuf object, the following * invariant holds: * @code size() <= max_size()@endcode * Any member function that would, if successful, cause the invariant to be * violated shall throw an exception of class @c std::length_error. * * The constructor for @c basic_streambuf takes an Allocator argument. A copy * of this argument is used for any memory allocation performed, by the * constructor and by all member functions, during the lifetime of each @c * basic_streambuf object. * * @par Examples * Writing directly from an streambuf to a socket: * @code * asio::streambuf b; * std::ostream os(&b); * os << "Hello, World!\n"; * * // try sending some data in input sequence * size_t n = sock.send(b.data()); * * b.consume(n); // sent data is removed from input sequence * @endcode * * Reading from a socket directly into a streambuf: * @code * asio::streambuf b; * * // reserve 512 bytes in output sequence * asio::streambuf::mutable_buffers_type bufs = b.prepare(512); * * size_t n = sock.receive(bufs); * * // received data is "committed" from output sequence to input sequence * b.commit(n); * * std::istream is(&b); * std::string s; * is >> s; * @endcode */ #if defined(GENERATING_DOCUMENTATION) template > #else template #endif class basic_streambuf : public std::streambuf, private noncopyable { public: #if defined(GENERATING_DOCUMENTATION) /// The type used to represent the input sequence as a list of buffers. typedef implementation_defined const_buffers_type; /// The type used to represent the output sequence as a list of buffers. typedef implementation_defined mutable_buffers_type; #else typedef ASIO_CONST_BUFFER const_buffers_type; typedef ASIO_MUTABLE_BUFFER mutable_buffers_type; #endif /// Construct a basic_streambuf object. /** * Constructs a streambuf with the specified maximum size. The initial size * of the streambuf's input sequence is 0. */ explicit basic_streambuf( std::size_t maximum_size = (std::numeric_limits::max)(), const Allocator& allocator = Allocator()) : max_size_(maximum_size), buffer_(allocator) { std::size_t pend = (std::min)(max_size_, buffer_delta); buffer_.resize((std::max)(pend, 1)); setg(&buffer_[0], &buffer_[0], &buffer_[0]); setp(&buffer_[0], &buffer_[0] + pend); } /// Get the size of the input sequence. /** * @returns The size of the input sequence. The value is equal to that * calculated for @c s in the following code: * @code * size_t s = 0; * const_buffers_type bufs = data(); * const_buffers_type::const_iterator i = bufs.begin(); * while (i != bufs.end()) * { * const_buffer buf(*i++); * s += buf.size(); * } * @endcode */ std::size_t size() const ASIO_NOEXCEPT { return pptr() - gptr(); } /// Get the maximum size of the basic_streambuf. /** * @returns The allowed maximum of the sum of the sizes of the input sequence * and output sequence. */ std::size_t max_size() const ASIO_NOEXCEPT { return max_size_; } /// Get the current capacity of the basic_streambuf. /** * @returns The current total capacity of the streambuf, i.e. for both the * input sequence and output sequence. */ std::size_t capacity() const ASIO_NOEXCEPT { return buffer_.capacity(); } /// Get a list of buffers that represents the input sequence. /** * @returns An object of type @c const_buffers_type that satisfies * ConstBufferSequence requirements, representing all character arrays in the * input sequence. * * @note The returned object is invalidated by any @c basic_streambuf member * function that modifies the input sequence or output sequence. */ const_buffers_type data() const ASIO_NOEXCEPT { return asio::buffer(asio::const_buffer(gptr(), (pptr() - gptr()) * sizeof(char_type))); } /// Get a list of buffers that represents the output sequence, with the given /// size. /** * Ensures that the output sequence can accommodate @c n characters, * reallocating character array objects as necessary. * * @returns An object of type @c mutable_buffers_type that satisfies * MutableBufferSequence requirements, representing character array objects * at the start of the output sequence such that the sum of the buffer sizes * is @c n. * * @throws std::length_error If size() + n > max_size(). * * @note The returned object is invalidated by any @c basic_streambuf member * function that modifies the input sequence or output sequence. */ mutable_buffers_type prepare(std::size_t n) { reserve(n); return asio::buffer(asio::mutable_buffer( pptr(), n * sizeof(char_type))); } /// Move characters from the output sequence to the input sequence. /** * Appends @c n characters from the start of the output sequence to the input * sequence. The beginning of the output sequence is advanced by @c n * characters. * * Requires a preceding call prepare(x) where x >= n, and * no intervening operations that modify the input or output sequence. * * @note If @c n is greater than the size of the output sequence, the entire * output sequence is moved to the input sequence and no error is issued. */ void commit(std::size_t n) { n = std::min(n, epptr() - pptr()); pbump(static_cast(n)); setg(eback(), gptr(), pptr()); } /// Remove characters from the input sequence. /** * Removes @c n characters from the beginning of the input sequence. * * @note If @c n is greater than the size of the input sequence, the entire * input sequence is consumed and no error is issued. */ void consume(std::size_t n) { if (egptr() < pptr()) setg(&buffer_[0], gptr(), pptr()); if (gptr() + n > pptr()) n = pptr() - gptr(); gbump(static_cast(n)); } protected: enum { buffer_delta = 128 }; /// Override std::streambuf behaviour. /** * Behaves according to the specification of @c std::streambuf::underflow(). */ int_type underflow() { if (gptr() < pptr()) { setg(&buffer_[0], gptr(), pptr()); return traits_type::to_int_type(*gptr()); } else { return traits_type::eof(); } } /// Override std::streambuf behaviour. /** * Behaves according to the specification of @c std::streambuf::overflow(), * with the specialisation that @c std::length_error is thrown if appending * the character to the input sequence would require the condition * size() > max_size() to be true. */ int_type overflow(int_type c) { if (!traits_type::eq_int_type(c, traits_type::eof())) { if (pptr() == epptr()) { std::size_t buffer_size = pptr() - gptr(); if (buffer_size < max_size_ && max_size_ - buffer_size < buffer_delta) { reserve(max_size_ - buffer_size); } else { reserve(buffer_delta); } } *pptr() = traits_type::to_char_type(c); pbump(1); return c; } return traits_type::not_eof(c); } void reserve(std::size_t n) { // Get current stream positions as offsets. std::size_t gnext = gptr() - &buffer_[0]; std::size_t pnext = pptr() - &buffer_[0]; std::size_t pend = epptr() - &buffer_[0]; // Check if there is already enough space in the put area. if (n <= pend - pnext) { return; } // Shift existing contents of get area to start of buffer. if (gnext > 0) { pnext -= gnext; std::memmove(&buffer_[0], &buffer_[0] + gnext, pnext); } // Ensure buffer is large enough to hold at least the specified size. if (n > pend - pnext) { if (n <= max_size_ && pnext <= max_size_ - n) { pend = pnext + n; buffer_.resize((std::max)(pend, 1)); } else { std::length_error ex("asio::streambuf too long"); asio::detail::throw_exception(ex); } } // Update stream positions. setg(&buffer_[0], &buffer_[0], &buffer_[0] + pnext); setp(&buffer_[0] + pnext, &buffer_[0] + pend); } private: std::size_t max_size_; std::vector buffer_; // Helper function to get the preferred size for reading data. friend std::size_t read_size_helper( basic_streambuf& sb, std::size_t max_size) { return std::min( std::max(512, sb.buffer_.capacity() - sb.size()), std::min(max_size, sb.max_size() - sb.size())); } }; /// Adapts basic_streambuf to the dynamic buffer sequence type requirements. #if defined(GENERATING_DOCUMENTATION) template > #else template #endif class basic_streambuf_ref { public: /// The type used to represent the input sequence as a list of buffers. typedef typename basic_streambuf::const_buffers_type const_buffers_type; /// The type used to represent the output sequence as a list of buffers. typedef typename basic_streambuf::mutable_buffers_type mutable_buffers_type; /// Construct a basic_streambuf_ref for the given basic_streambuf object. explicit basic_streambuf_ref(basic_streambuf& sb) : sb_(sb) { } /// Copy construct a basic_streambuf_ref. basic_streambuf_ref(const basic_streambuf_ref& other) ASIO_NOEXCEPT : sb_(other.sb_) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move construct a basic_streambuf_ref. basic_streambuf_ref(basic_streambuf_ref&& other) ASIO_NOEXCEPT : sb_(other.sb_) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Get the size of the input sequence. std::size_t size() const ASIO_NOEXCEPT { return sb_.size(); } /// Get the maximum size of the dynamic buffer. std::size_t max_size() const ASIO_NOEXCEPT { return sb_.max_size(); } /// Get the current capacity of the dynamic buffer. std::size_t capacity() const ASIO_NOEXCEPT { return sb_.capacity(); } /// Get a list of buffers that represents the input sequence. const_buffers_type data() const ASIO_NOEXCEPT { return sb_.data(); } /// Get a list of buffers that represents the output sequence, with the given /// size. mutable_buffers_type prepare(std::size_t n) { return sb_.prepare(n); } /// Move bytes from the output sequence to the input sequence. void commit(std::size_t n) { return sb_.commit(n); } /// Remove characters from the input sequence. void consume(std::size_t n) { return sb_.consume(n); } private: basic_streambuf& sb_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_BASIC_STREAMBUF_HPP galera-4-26.4.25/asio/asio/buffered_stream_fwd.hpp000644 000164 177776 00000001144 15107057155 023110 0ustar00jenkinsnogroup000000 000000 // // buffered_stream_fwd.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFERED_STREAM_FWD_HPP #define ASIO_BUFFERED_STREAM_FWD_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) namespace asio { template class buffered_stream; } // namespace asio #endif // ASIO_BUFFERED_STREAM_FWD_HPP galera-4-26.4.25/asio/asio/system_context.hpp000644 000164 177776 00000004125 15107057155 022205 0ustar00jenkinsnogroup000000 000000 // // system_context.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SYSTEM_CONTEXT_HPP #define ASIO_SYSTEM_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/scheduler.hpp" #include "asio/detail/thread_group.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { class system_executor; /// The executor context for the system executor. class system_context : public execution_context { public: /// The executor type associated with the context. typedef system_executor executor_type; /// Destructor shuts down all threads in the system thread pool. ASIO_DECL ~system_context(); /// Obtain an executor for the context. executor_type get_executor() ASIO_NOEXCEPT; /// Signal all threads in the system thread pool to stop. ASIO_DECL void stop(); /// Determine whether the system thread pool has been stopped. ASIO_DECL bool stopped() const ASIO_NOEXCEPT; /// Join all threads in the system thread pool. ASIO_DECL void join(); #if defined(GENERATING_DOCUMENTATION) private: #endif // defined(GENERATING_DOCUMENTATION) // Constructor creates all threads in the system thread pool. ASIO_DECL system_context(); private: friend class system_executor; struct thread_function; // Helper function to create the underlying scheduler. ASIO_DECL detail::scheduler& add_scheduler(detail::scheduler* s); // The underlying scheduler. detail::scheduler& scheduler_; // The threads in the system thread pool. detail::thread_group threads_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/system_context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/system_context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SYSTEM_CONTEXT_HPP galera-4-26.4.25/asio/asio/socket_base.hpp000644 000164 177776 00000035456 15107057155 021412 0ustar00jenkinsnogroup000000 000000 // // socket_base.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SOCKET_BASE_HPP #define ASIO_SOCKET_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/io_control.hpp" #include "asio/detail/socket_option.hpp" #include "asio/detail/socket_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// The socket_base class is used as a base for the basic_stream_socket and /// basic_datagram_socket class templates so that we have a common place to /// define the shutdown_type and enum. class socket_base { public: /// Different ways a socket may be shutdown. enum shutdown_type { #if defined(GENERATING_DOCUMENTATION) /// Shutdown the receive side of the socket. shutdown_receive = implementation_defined, /// Shutdown the send side of the socket. shutdown_send = implementation_defined, /// Shutdown both send and receive on the socket. shutdown_both = implementation_defined #else shutdown_receive = ASIO_OS_DEF(SHUT_RD), shutdown_send = ASIO_OS_DEF(SHUT_WR), shutdown_both = ASIO_OS_DEF(SHUT_RDWR) #endif }; /// Bitmask type for flags that can be passed to send and receive operations. typedef int message_flags; #if defined(GENERATING_DOCUMENTATION) /// Peek at incoming data without removing it from the input queue. static const int message_peek = implementation_defined; /// Process out-of-band data. static const int message_out_of_band = implementation_defined; /// Specify that the data should not be subject to routing. static const int message_do_not_route = implementation_defined; /// Specifies that the data marks the end of a record. static const int message_end_of_record = implementation_defined; #else ASIO_STATIC_CONSTANT(int, message_peek = ASIO_OS_DEF(MSG_PEEK)); ASIO_STATIC_CONSTANT(int, message_out_of_band = ASIO_OS_DEF(MSG_OOB)); ASIO_STATIC_CONSTANT(int, message_do_not_route = ASIO_OS_DEF(MSG_DONTROUTE)); ASIO_STATIC_CONSTANT(int, message_end_of_record = ASIO_OS_DEF(MSG_EOR)); #endif /// Wait types. /** * For use with basic_socket::wait() and basic_socket::async_wait(). */ enum wait_type { /// Wait for a socket to become ready to read. wait_read, /// Wait for a socket to become ready to write. wait_write, /// Wait for a socket to have error conditions pending. wait_error }; /// Socket option to permit sending of broadcast messages. /** * Implements the SOL_SOCKET/SO_BROADCAST socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::socket_base::broadcast option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::socket_base::broadcast option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined broadcast; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_BROADCAST)> broadcast; #endif /// Socket option to enable socket-level debugging. /** * Implements the SOL_SOCKET/SO_DEBUG socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::debug option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::debug option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined debug; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DEBUG)> debug; #endif /// Socket option to prevent routing, use local interfaces only. /** * Implements the SOL_SOCKET/SO_DONTROUTE socket option. * * @par Examples * Setting the option: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::socket_base::do_not_route option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::udp::socket socket(my_context); * ... * asio::socket_base::do_not_route option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined do_not_route; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_DONTROUTE)> do_not_route; #endif /// Socket option to send keep-alives. /** * Implements the SOL_SOCKET/SO_KEEPALIVE socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::keep_alive option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::keep_alive option; * socket.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined keep_alive; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_KEEPALIVE)> keep_alive; #endif /// Socket option for the send buffer size of a socket. /** * Implements the SOL_SOCKET/SO_SNDBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::send_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::send_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDBUF)> send_buffer_size; #endif /// Socket option for the send low watermark. /** * Implements the SOL_SOCKET/SO_SNDLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::send_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::send_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined send_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_SNDLOWAT)> send_low_watermark; #endif /// Socket option for the receive buffer size of a socket. /** * Implements the SOL_SOCKET/SO_RCVBUF socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::receive_buffer_size option(8192); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::receive_buffer_size option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_buffer_size; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVBUF)> receive_buffer_size; #endif /// Socket option for the receive low watermark. /** * Implements the SOL_SOCKET/SO_RCVLOWAT socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::receive_low_watermark option(1024); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::receive_low_watermark option; * socket.get_option(option); * int size = option.value(); * @endcode * * @par Concepts: * Socket_Option, Integer_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined receive_low_watermark; #else typedef asio::detail::socket_option::integer< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_RCVLOWAT)> receive_low_watermark; #endif /// Socket option to allow the socket to be bound to an address that is /// already in use. /** * Implements the SOL_SOCKET/SO_REUSEADDR socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::socket_base::reuse_address option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::socket_base::reuse_address option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined reuse_address; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_REUSEADDR)> reuse_address; #endif /// Socket option to specify whether the socket lingers on close if unsent /// data is present. /** * Implements the SOL_SOCKET/SO_LINGER socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::linger option(true, 30); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::linger option; * socket.get_option(option); * bool is_set = option.enabled(); * unsigned short timeout = option.timeout(); * @endcode * * @par Concepts: * Socket_Option, Linger_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined linger; #else typedef asio::detail::socket_option::linger< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_LINGER)> linger; #endif /// Socket option for putting received out-of-band data inline. /** * Implements the SOL_SOCKET/SO_OOBINLINE socket option. * * @par Examples * Setting the option: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::out_of_band_inline option(true); * socket.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::out_of_band_inline option; * socket.get_option(option); * bool value = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined out_of_band_inline; #else typedef asio::detail::socket_option::boolean< ASIO_OS_DEF(SOL_SOCKET), ASIO_OS_DEF(SO_OOBINLINE)> out_of_band_inline; #endif /// Socket option to report aborted connections on accept. /** * Implements a custom socket option that determines whether or not an accept * operation is permitted to fail with asio::error::connection_aborted. * By default the option is false. * * @par Examples * Setting the option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::socket_base::enable_connection_aborted option(true); * acceptor.set_option(option); * @endcode * * @par * Getting the current option value: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::socket_base::enable_connection_aborted option; * acceptor.get_option(option); * bool is_set = option.value(); * @endcode * * @par Concepts: * Socket_Option, Boolean_Socket_Option. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined enable_connection_aborted; #else typedef asio::detail::socket_option::boolean< asio::detail::custom_socket_option_level, asio::detail::enable_connection_aborted_option> enable_connection_aborted; #endif /// IO control command to get the amount of data that can be read without /// blocking. /** * Implements the FIONREAD IO control command. * * @par Example * @code * asio::ip::tcp::socket socket(my_context); * ... * asio::socket_base::bytes_readable command(true); * socket.io_control(command); * std::size_t bytes_readable = command.get(); * @endcode * * @par Concepts: * IO_Control_Command, Size_IO_Control_Command. */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined bytes_readable; #else typedef asio::detail::io_control::bytes_readable bytes_readable; #endif /// The maximum length of the queue of pending incoming connections. #if defined(GENERATING_DOCUMENTATION) static const int max_listen_connections = implementation_defined; #else ASIO_STATIC_CONSTANT(int, max_listen_connections = ASIO_OS_DEF(SOMAXCONN)); #endif #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use max_listen_connections.) The maximum length of the queue /// of pending incoming connections. #if defined(GENERATING_DOCUMENTATION) static const int max_connections = implementation_defined; #else ASIO_STATIC_CONSTANT(int, max_connections = ASIO_OS_DEF(SOMAXCONN)); #endif #endif // !defined(ASIO_NO_DEPRECATED) protected: /// Protected destructor to prevent deletion through this type. ~socket_base() { } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SOCKET_BASE_HPP galera-4-26.4.25/asio/asio/handler_continuation_hook.hpp000644 000164 177776 00000002567 15107057155 024354 0ustar00jenkinsnogroup000000 000000 // // handler_continuation_hook.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_CONTINUATION_HOOK_HPP #define ASIO_HANDLER_CONTINUATION_HOOK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Default continuation function for handlers. /** * Asynchronous operations may represent a continuation of the asynchronous * control flow associated with the current handler. The implementation can use * this knowledge to optimise scheduling of the handler. * * Implement asio_handler_is_continuation for your own handlers to indicate * when a handler represents a continuation. * * The default implementation of the continuation hook returns false. * * @par Example * @code * class my_handler; * * bool asio_handler_is_continuation(my_handler* context) * { * return true; * } * @endcode */ inline bool asio_handler_is_continuation(...) { return false; } } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_HANDLER_CONTINUATION_HOOK_HPP galera-4-26.4.25/asio/asio/ssl/000755 000164 177776 00000000000 15107057160 017177 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ssl/context_base.hpp000644 000164 177776 00000012177 15107057155 022402 0ustar00jenkinsnogroup000000 000000 // // ssl/context_base.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_BASE_HPP #define ASIO_SSL_CONTEXT_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The context_base class is used as a base for the basic_context class /// template so that we have a common place to define various enums. class context_base { public: /// Different methods supported by a context. enum method { /// Generic SSL version 2. sslv2, /// SSL version 2 client. sslv2_client, /// SSL version 2 server. sslv2_server, /// Generic SSL version 3. sslv3, /// SSL version 3 client. sslv3_client, /// SSL version 3 server. sslv3_server, /// Generic TLS version 1. tlsv1, /// TLS version 1 client. tlsv1_client, /// TLS version 1 server. tlsv1_server, /// Generic SSL/TLS. sslv23, /// SSL/TLS client. sslv23_client, /// SSL/TLS server. sslv23_server, /// Generic TLS version 1.1. tlsv11, /// TLS version 1.1 client. tlsv11_client, /// TLS version 1.1 server. tlsv11_server, /// Generic TLS version 1.2. tlsv12, /// TLS version 1.2 client. tlsv12_client, /// TLS version 1.2 server. tlsv12_server, /// Generic TLS version 1.3. tlsv13, /// TLS version 1.3 client. tlsv13_client, /// TLS version 1.3 server. tlsv13_server, /// Generic TLS. tls, /// TLS client. tls_client, /// TLS server. tls_server }; /// Bitmask type for SSL options. typedef long options; #if defined(GENERATING_DOCUMENTATION) /// Implement various bug workarounds. static const long default_workarounds = implementation_defined; /// Always create a new key when using tmp_dh parameters. static const long single_dh_use = implementation_defined; /// Disable SSL v2. static const long no_sslv2 = implementation_defined; /// Disable SSL v3. static const long no_sslv3 = implementation_defined; /// Disable TLS v1. static const long no_tlsv1 = implementation_defined; /// Disable TLS v1.1. static const long no_tlsv1_1 = implementation_defined; /// Disable TLS v1.2. static const long no_tlsv1_2 = implementation_defined; /// Disable TLS v1.3. static const long no_tlsv1_3 = implementation_defined; /// Disable compression. Compression is disabled by default. static const long no_compression = implementation_defined; #else ASIO_STATIC_CONSTANT(long, default_workarounds = SSL_OP_ALL); ASIO_STATIC_CONSTANT(long, single_dh_use = SSL_OP_SINGLE_DH_USE); ASIO_STATIC_CONSTANT(long, no_sslv2 = SSL_OP_NO_SSLv2); ASIO_STATIC_CONSTANT(long, no_sslv3 = SSL_OP_NO_SSLv3); ASIO_STATIC_CONSTANT(long, no_tlsv1 = SSL_OP_NO_TLSv1); # if defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = SSL_OP_NO_TLSv1_1); # else // defined(SSL_OP_NO_TLSv1_1) ASIO_STATIC_CONSTANT(long, no_tlsv1_1 = 0x10000000L); # endif // defined(SSL_OP_NO_TLSv1_1) # if defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = SSL_OP_NO_TLSv1_2); # else // defined(SSL_OP_NO_TLSv1_2) ASIO_STATIC_CONSTANT(long, no_tlsv1_2 = 0x08000000L); # endif // defined(SSL_OP_NO_TLSv1_2) # if defined(SSL_OP_NO_TLSv1_3) ASIO_STATIC_CONSTANT(long, no_tlsv1_3 = SSL_OP_NO_TLSv1_3); # else // defined(SSL_OP_NO_TLSv1_3) ASIO_STATIC_CONSTANT(long, no_tlsv1_3 = 0x20000000L); # endif // defined(SSL_OP_NO_TLSv1_3) # if defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = SSL_OP_NO_COMPRESSION); # else // defined(SSL_OP_NO_COMPRESSION) ASIO_STATIC_CONSTANT(long, no_compression = 0x20000L); # endif // defined(SSL_OP_NO_COMPRESSION) #endif /// File format types. enum file_format { /// ASN.1 file. asn1, /// PEM file. pem }; #if !defined(GENERATING_DOCUMENTATION) // The following types and constants are preserved for backward compatibility. // New programs should use the equivalents of the same names that are defined // in the asio::ssl namespace. typedef int verify_mode; ASIO_STATIC_CONSTANT(int, verify_none = SSL_VERIFY_NONE); ASIO_STATIC_CONSTANT(int, verify_peer = SSL_VERIFY_PEER); ASIO_STATIC_CONSTANT(int, verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT); ASIO_STATIC_CONSTANT(int, verify_client_once = SSL_VERIFY_CLIENT_ONCE); #endif /// Purpose of PEM password. enum password_purpose { /// The password is needed for reading/decryption. for_reading, /// The password is needed for writing/encryption. for_writing }; protected: /// Protected destructor to prevent deletion through this type. ~context_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_CONTEXT_BASE_HPP galera-4-26.4.25/asio/asio/ssl/impl/000755 000164 177776 00000000000 15107057160 020140 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ssl/impl/error.ipp000644 000164 177776 00000004060 15107057155 022007 0ustar00jenkinsnogroup000000 000000 // // ssl/impl/error.ipp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_ERROR_IPP #define ASIO_SSL_IMPL_ERROR_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { namespace detail { class ssl_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.ssl"; } std::string message(int value) const { const char* s = ::ERR_reason_error_string(value); return s ? s : "asio.ssl error"; } }; } // namespace detail const asio::error_category& get_ssl_category() { static detail::ssl_category instance; return instance; } } // namespace error namespace ssl { namespace error { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) && !defined(OPENSSL_IS_BORINGSSL) const asio::error_category& get_stream_category() { return asio::error::get_ssl_category(); } #else namespace detail { class stream_category : public asio::error_category { public: const char* name() const ASIO_ERROR_CATEGORY_NOEXCEPT { return "asio.ssl.stream"; } std::string message(int value) const { switch (value) { case stream_truncated: return "stream truncated"; case unspecified_system_error: return "unspecified system error"; case unexpected_result: return "unexpected result"; default: return "asio.ssl.stream error"; } } }; } // namespace detail const asio::error_category& get_stream_category() { static detail::stream_category instance; return instance; } #endif } // namespace error } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_ERROR_IPP galera-4-26.4.25/asio/asio/ssl/impl/context.hpp000644 000164 177776 00000003406 15107057155 022344 0ustar00jenkinsnogroup000000 000000 // // ssl/impl/context.hpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_HPP #define ASIO_SSL_IMPL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { template void context::set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } template ASIO_SYNC_OP_VOID context::set_verify_callback( VerifyCallback callback, asio::error_code& ec) { do_set_verify_callback( new detail::verify_callback(callback), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } template void context::set_password_callback(PasswordCallback callback) { asio::error_code ec; this->set_password_callback(callback, ec); asio::detail::throw_error(ec, "set_password_callback"); } template ASIO_SYNC_OP_VOID context::set_password_callback( PasswordCallback callback, asio::error_code& ec) { do_set_password_callback( new detail::password_callback(callback), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_HPP galera-4-26.4.25/asio/asio/ssl/impl/context.ipp000644 000164 177776 00000077705 15107057155 022362 0ustar00jenkinsnogroup000000 000000 // // ssl/impl/context.ipp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_CONTEXT_IPP #define ASIO_SSL_IMPL_CONTEXT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/ssl/context.hpp" #include "asio/ssl/error.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { struct context::bio_cleanup { BIO* p; ~bio_cleanup() { if (p) ::BIO_free(p); } }; struct context::x509_cleanup { X509* p; ~x509_cleanup() { if (p) ::X509_free(p); } }; struct context::evp_pkey_cleanup { EVP_PKEY* p; ~evp_pkey_cleanup() { if (p) ::EVP_PKEY_free(p); } }; struct context::rsa_cleanup { RSA* p; ~rsa_cleanup() { if (p) ::RSA_free(p); } }; struct context::dh_cleanup { DH* p; ~dh_cleanup() { if (p) ::DH_free(p); } }; context::context(context::method m) : handle_(0) { ::ERR_clear_error(); switch (m) { // SSL v2. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) case context::sslv2: case context::sslv2_client: case context::sslv2_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) case context::sslv2: handle_ = ::SSL_CTX_new(::SSLv2_method()); break; case context::sslv2_client: handle_ = ::SSL_CTX_new(::SSLv2_client_method()); break; case context::sslv2_server: handle_ = ::SSL_CTX_new(::SSLv2_server_method()); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) || defined(OPENSSL_NO_SSL2) // SSL v3. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::sslv3: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, SSL3_VERSION); SSL_CTX_set_max_proto_version(handle_, SSL3_VERSION); } break; #elif defined(OPENSSL_NO_SSL3) case context::sslv3: case context::sslv3_client: case context::sslv3_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #else // defined(OPENSSL_NO_SSL3) case context::sslv3: handle_ = ::SSL_CTX_new(::SSLv3_method()); break; case context::sslv3_client: handle_ = ::SSL_CTX_new(::SSLv3_client_method()); break; case context::sslv3_server: handle_ = ::SSL_CTX_new(::SSLv3_server_method()); break; #endif // defined(OPENSSL_NO_SSL3) // TLS v1.0. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv1: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_VERSION); } break; #elif defined(SSL_TXT_TLSV1) case context::tlsv1: handle_ = ::SSL_CTX_new(::TLSv1_method()); break; case context::tlsv1_client: handle_ = ::SSL_CTX_new(::TLSv1_client_method()); break; case context::tlsv1_server: handle_ = ::SSL_CTX_new(::TLSv1_server_method()); break; #else // defined(SSL_TXT_TLSV1) case context::tlsv1: case context::tlsv1_client: case context::tlsv1_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1) // TLS v1.1. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_1_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_1_VERSION); } break; #elif defined(SSL_TXT_TLSV1_1) case context::tlsv11: handle_ = ::SSL_CTX_new(::TLSv1_1_method()); break; case context::tlsv11_client: handle_ = ::SSL_CTX_new(::TLSv1_1_client_method()); break; case context::tlsv11_server: handle_ = ::SSL_CTX_new(::TLSv1_1_server_method()); break; #else // defined(SSL_TXT_TLSV1_1) case context::tlsv11: case context::tlsv11_client: case context::tlsv11_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_1) // TLS v1.2. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_2_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_2_VERSION); } break; #elif defined(SSL_TXT_TLSV1_2) case context::tlsv12: handle_ = ::SSL_CTX_new(::TLSv1_2_method()); break; case context::tlsv12_client: handle_ = ::SSL_CTX_new(::TLSv1_2_client_method()); break; case context::tlsv12_server: handle_ = ::SSL_CTX_new(::TLSv1_2_server_method()); break; #else // defined(SSL_TXT_TLSV1_2) case context::tlsv12: case context::tlsv12_client: case context::tlsv12_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // defined(SSL_TXT_TLSV1_2) // TLS v1.3. #if (OPENSSL_VERSION_NUMBER >= 0x10101000L) \ && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv13: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; case context::tlsv13_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; case context::tlsv13_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) { SSL_CTX_set_min_proto_version(handle_, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(handle_, TLS1_3_VERSION); } break; #else // (OPENSSL_VERSION_NUMBER >= 0x10101000L) // && !defined(LIBRESSL_VERSION_NUMBER) case context::tlsv13: case context::tlsv13_client: case context::tlsv13_server: asio::detail::throw_error( asio::error::invalid_argument, "context"); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10101000L) // && !defined(LIBRESSL_VERSION_NUMBER) // Any supported SSL/TLS version. case context::sslv23: handle_ = ::SSL_CTX_new(::SSLv23_method()); break; case context::sslv23_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); break; case context::sslv23_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); break; // Any supported TLS version. #if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(LIBRESSL_VERSION_NUMBER) case context::tls: handle_ = ::SSL_CTX_new(::TLS_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; case context::tls_client: handle_ = ::SSL_CTX_new(::TLS_client_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; case context::tls_server: handle_ = ::SSL_CTX_new(::TLS_server_method()); if (handle_) SSL_CTX_set_min_proto_version(handle_, TLS1_VERSION); break; #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) case context::tls: handle_ = ::SSL_CTX_new(::SSLv23_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; case context::tls_client: handle_ = ::SSL_CTX_new(::SSLv23_client_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; case context::tls_server: handle_ = ::SSL_CTX_new(::SSLv23_server_method()); if (handle_) SSL_CTX_set_options(handle_, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); break; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) default: handle_ = ::SSL_CTX_new(0); break; } if (handle_ == 0) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "context"); } set_options(no_compression); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::context(context&& other) { handle_ = other.handle_; other.handle_ = 0; } context& context::operator=(context&& other) { context tmp(ASIO_MOVE_CAST(context)(*this)); handle_ = other.handle_; other.handle_ = 0; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) context::~context() { if (handle_) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (cb_userdata) { detail::password_callback_base* callback = static_cast( cb_userdata); delete callback; #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) ::SSL_CTX_set_default_passwd_cb_userdata(handle_, 0); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) handle_->default_passwd_callback_userdata = 0; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) } if (SSL_CTX_get_app_data(handle_)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle_)); delete callback; SSL_CTX_set_app_data(handle_, 0); } ::SSL_CTX_free(handle_); } } context::native_handle_type context::native_handle() { return handle_; } void context::clear_options(context::options o) { asio::error_code ec; clear_options(o, ec); asio::detail::throw_error(ec, "clear_options"); } ASIO_SYNC_OP_VOID context::clear_options( context::options o, asio::error_code& ec) { #if (OPENSSL_VERSION_NUMBER >= 0x009080DFL) \ && (OPENSSL_VERSION_NUMBER != 0x00909000L) # if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { # if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = SSL_COMP_get_compression_methods(); # endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } # endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_clear_options(handle_, o); ec = asio::error_code(); #else // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) (void)o; ec = asio::error::operation_not_supported; #endif // (OPENSSL_VERSION_NUMBER >= 0x009080DFL) // && (OPENSSL_VERSION_NUMBER != 0x00909000L) ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_options(context::options o) { asio::error_code ec; set_options(o, ec); asio::detail::throw_error(ec, "set_options"); } ASIO_SYNC_OP_VOID context::set_options( context::options o, asio::error_code& ec) { #if !defined(SSL_OP_NO_COMPRESSION) if ((o & context::no_compression) != 0) { #if (OPENSSL_VERSION_NUMBER >= 0x00908000L) handle_->comp_methods = asio::ssl::detail::openssl_init<>::get_null_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x00908000L) o ^= context::no_compression; } #endif // !defined(SSL_OP_NO_COMPRESSION) ::SSL_CTX_set_options(handle_, o); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } ASIO_SYNC_OP_VOID context::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_CTX_set_verify(handle_, v, ::SSL_CTX_get_verify_callback(handle_)); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } ASIO_SYNC_OP_VOID context::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_CTX_set_verify_depth(handle_, depth); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::load_verify_file(const std::string& filename) { asio::error_code ec; load_verify_file(filename, ec); asio::detail::throw_error(ec, "load_verify_file"); } ASIO_SYNC_OP_VOID context::load_verify_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, filename.c_str(), 0) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::add_certificate_authority(const const_buffer& ca) { asio::error_code ec; add_certificate_authority(ca, ec); asio::detail::throw_error(ec, "add_certificate_authority"); } ASIO_SYNC_OP_VOID context::add_certificate_authority( const const_buffer& ca, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(ca) }; if (bio.p) { if (X509_STORE* store = ::SSL_CTX_get_cert_store(handle_)) { for (;;) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (!cert.p) break; if (::X509_STORE_add_cert(store, cert.p) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } } } } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::set_default_verify_paths() { asio::error_code ec; set_default_verify_paths(ec); asio::detail::throw_error(ec, "set_default_verify_paths"); } ASIO_SYNC_OP_VOID context::set_default_verify_paths( asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_set_default_verify_paths(handle_) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::add_verify_path(const std::string& path) { asio::error_code ec; add_verify_path(path, ec); asio::detail::throw_error(ec, "add_verify_path"); } ASIO_SYNC_OP_VOID context::add_verify_path( const std::string& path, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_load_verify_locations(handle_, 0, path.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate( const const_buffer& certificate, file_format format) { asio::error_code ec; use_certificate(certificate, format, ec); asio::detail::throw_error(ec, "use_certificate"); } ASIO_SYNC_OP_VOID context::use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec) { ::ERR_clear_error(); if (format == context_base::asn1) { if (::SSL_CTX_use_certificate_ASN1(handle_, static_cast(certificate.size()), static_cast(certificate.data())) == 1) { ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } else if (format == context_base::pem) { bio_cleanup bio = { make_buffer_bio(certificate) }; if (bio.p) { x509_cleanup cert = { ::PEM_read_bio_X509(bio.p, 0, 0, 0) }; if (cert.p) { if (::SSL_CTX_use_certificate(handle_, cert.p) == 1) { ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } } } else { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_file( const std::string& filename, file_format format) { asio::error_code ec; use_certificate_file(filename, format, ec); asio::detail::throw_error(ec, "use_certificate_file"); } ASIO_SYNC_OP_VOID context::use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_certificate_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_chain(const const_buffer& chain) { asio::error_code ec; use_certificate_chain(chain, ec); asio::detail::throw_error(ec, "use_certificate_chain"); } ASIO_SYNC_OP_VOID context::use_certificate_chain( const const_buffer& chain, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(chain) }; if (bio.p) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) x509_cleanup cert = { ::PEM_read_bio_X509_AUX(bio.p, 0, callback, cb_userdata) }; if (!cert.p) { ec = asio::error_code(ERR_R_PEM_LIB, asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } int result = ::SSL_CTX_use_certificate(handle_, cert.p); if (result == 0 || ::ERR_peek_error() != 0) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } #if ((OPENSSL_VERSION_NUMBER >= 0x10002000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) ::SSL_CTX_clear_chain_certs(handle_); #else if (handle_->extra_certs) { ::sk_X509_pop_free(handle_->extra_certs, X509_free); handle_->extra_certs = 0; } #endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L) while (X509* cacert = ::PEM_read_bio_X509(bio.p, 0, callback, cb_userdata)) { if (!::SSL_CTX_add_extra_chain_cert(handle_, cacert)) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } } result = ::ERR_peek_last_error(); if ((ERR_GET_LIB(result) == ERR_LIB_PEM) && (ERR_GET_REASON(result) == PEM_R_NO_START_LINE)) { ::ERR_clear_error(); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_certificate_chain_file(const std::string& filename) { asio::error_code ec; use_certificate_chain_file(filename, ec); asio::detail::throw_error(ec, "use_certificate_chain_file"); } ASIO_SYNC_OP_VOID context::use_certificate_chain_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); if (::SSL_CTX_use_certificate_chain_file(handle_, filename.c_str()) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_private_key"); } ASIO_SYNC_OP_VOID context::use_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { evp_pkey_cleanup evp_private_key = { 0 }; switch (format) { case context_base::asn1: evp_private_key.p = ::d2i_PrivateKey_bio(bio.p, 0); break; case context_base::pem: evp_private_key.p = ::PEM_read_bio_PrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } } if (evp_private_key.p) { if (::SSL_CTX_use_PrivateKey(handle_, evp_private_key.p) == 1) { ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_private_key_file"); } void context::use_rsa_private_key( const const_buffer& private_key, context::file_format format) { asio::error_code ec; use_rsa_private_key(private_key, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key"); } ASIO_SYNC_OP_VOID context::use_rsa_private_key( const const_buffer& private_key, context::file_format format, asio::error_code& ec) { ::ERR_clear_error(); #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) pem_password_cb* callback = ::SSL_CTX_get_default_passwd_cb(handle_); void* cb_userdata = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) pem_password_cb* callback = handle_->default_passwd_callback; void* cb_userdata = handle_->default_passwd_callback_userdata; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) bio_cleanup bio = { make_buffer_bio(private_key) }; if (bio.p) { rsa_cleanup rsa_private_key = { 0 }; switch (format) { case context_base::asn1: rsa_private_key.p = ::d2i_RSAPrivateKey_bio(bio.p, 0); break; case context_base::pem: rsa_private_key.p = ::PEM_read_bio_RSAPrivateKey( bio.p, 0, callback, cb_userdata); break; default: { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } } if (rsa_private_key.p) { if (::SSL_CTX_use_RSAPrivateKey(handle_, rsa_private_key.p) == 1) { ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID context::use_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_PrivateKey_file(handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_rsa_private_key_file( const std::string& filename, context::file_format format) { asio::error_code ec; use_rsa_private_key_file(filename, format, ec); asio::detail::throw_error(ec, "use_rsa_private_key_file"); } ASIO_SYNC_OP_VOID context::use_rsa_private_key_file( const std::string& filename, context::file_format format, asio::error_code& ec) { int file_type; switch (format) { case context_base::asn1: file_type = SSL_FILETYPE_ASN1; break; case context_base::pem: file_type = SSL_FILETYPE_PEM; break; default: { ec = asio::error::invalid_argument; ASIO_SYNC_OP_VOID_RETURN(ec); } } ::ERR_clear_error(); if (::SSL_CTX_use_RSAPrivateKey_file( handle_, filename.c_str(), file_type) != 1) { ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_tmp_dh(const const_buffer& dh) { asio::error_code ec; use_tmp_dh(dh, ec); asio::detail::throw_error(ec, "use_tmp_dh"); } ASIO_SYNC_OP_VOID context::use_tmp_dh( const const_buffer& dh, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { make_buffer_bio(dh) }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } void context::use_tmp_dh_file(const std::string& filename) { asio::error_code ec; use_tmp_dh_file(filename, ec); asio::detail::throw_error(ec, "use_tmp_dh_file"); } ASIO_SYNC_OP_VOID context::use_tmp_dh_file( const std::string& filename, asio::error_code& ec) { ::ERR_clear_error(); bio_cleanup bio = { ::BIO_new_file(filename.c_str(), "r") }; if (bio.p) { return do_use_tmp_dh(bio.p, ec); } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID context::do_use_tmp_dh( BIO* bio, asio::error_code& ec) { ::ERR_clear_error(); dh_cleanup dh = { ::PEM_read_bio_DHparams(bio, 0, 0, 0) }; if (dh.p) { if (::SSL_CTX_set_tmp_dh(handle_, dh.p) == 1) { ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } } ec = asio::error_code( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); ASIO_SYNC_OP_VOID_RETURN(ec); } ASIO_SYNC_OP_VOID context::do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec) { if (SSL_CTX_get_app_data(handle_)) { delete static_cast( SSL_CTX_get_app_data(handle_)); } SSL_CTX_set_app_data(handle_, callback); ::SSL_CTX_set_verify(handle_, ::SSL_CTX_get_verify_mode(handle_), &context::verify_callback_function); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } int context::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_CTX* handle = ::SSL_get_SSL_CTX(ssl)) { if (SSL_CTX_get_app_data(handle)) { detail::verify_callback_base* callback = static_cast( SSL_CTX_get_app_data(handle)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } } return 0; } ASIO_SYNC_OP_VOID context::do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec) { #if ((OPENSSL_VERSION_NUMBER >= 0x10100000L) \ && !defined(LIBRESSL_VERSION_NUMBER)) \ || defined(ASIO_USE_WOLFSSL) void* old_callback = ::SSL_CTX_get_default_passwd_cb_userdata(handle_); ::SSL_CTX_set_default_passwd_cb_userdata(handle_, callback); #else // (OPENSSL_VERSION_NUMBER >= 0x10100000L) void* old_callback = handle_->default_passwd_callback_userdata; handle_->default_passwd_callback_userdata = callback; #endif // (OPENSSL_VERSION_NUMBER >= 0x10100000L) if (old_callback) delete static_cast( old_callback); SSL_CTX_set_default_passwd_cb(handle_, &context::password_callback_function); ec = asio::error_code(); ASIO_SYNC_OP_VOID_RETURN(ec); } int context::password_callback_function( char* buf, int size, int purpose, void* data) { using namespace std; // For strncat and strlen. if (data) { detail::password_callback_base* callback = static_cast(data); std::string passwd = callback->call(static_cast(size), purpose ? context_base::for_writing : context_base::for_reading); #if defined(ASIO_HAS_SECURE_RTL) strcpy_s(buf, size, passwd.c_str()); #else // defined(ASIO_HAS_SECURE_RTL) *buf = '\0'; if (size > 0) strncat(buf, passwd.c_str(), size - 1); #endif // defined(ASIO_HAS_SECURE_RTL) return static_cast(strlen(buf)); } return 0; } BIO* context::make_buffer_bio(const const_buffer& b) { return ::BIO_new_mem_buf( const_cast(b.data()), static_cast(b.size())); } } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_CONTEXT_IPP galera-4-26.4.25/asio/asio/ssl/impl/src.hpp000644 000164 177776 00000001370 15107057155 021445 0ustar00jenkinsnogroup000000 000000 // // impl/ssl/src.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_SRC_HPP #define ASIO_SSL_IMPL_SRC_HPP #define ASIO_SOURCE #include "asio/detail/config.hpp" #if defined(ASIO_HEADER_ONLY) # error Do not compile Asio library source with ASIO_HEADER_ONLY defined #endif #include "asio/ssl/impl/context.ipp" #include "asio/ssl/impl/error.ipp" #include "asio/ssl/detail/impl/engine.ipp" #include "asio/ssl/detail/impl/openssl_init.ipp" #include "asio/ssl/impl/rfc2818_verification.ipp" #endif // ASIO_SSL_IMPL_SRC_HPP galera-4-26.4.25/asio/asio/ssl/impl/rfc2818_verification.ipp000644 000164 177776 00000011053 15107057155 024515 0ustar00jenkinsnogroup000000 000000 // // ssl/impl/rfc2818_verification.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #define ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/ip/address.hpp" #include "asio/ssl/rfc2818_verification.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { bool rfc2818_verification::operator()( bool preverified, verify_context& ctx) const { using namespace std; // For memcmp. // Don't bother looking at certificates that have failed pre-verification. if (!preverified) return false; // We're only interested in checking the certificate at the end of the chain. int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle()); if (depth > 0) return true; // Try converting the host name to an address. If it is an address then we // need to look for an IP address in the certificate rather than a host name. asio::error_code ec; ip::address address = ip::make_address(host_, ec); bool is_address = !ec; X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle()); // Go through the alternate names in the certificate looking for matching DNS // or IP address entries. GENERAL_NAMES* gens = static_cast( X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0)); for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i) { GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i); if (gen->type == GEN_DNS && !is_address) { ASN1_IA5STRING* domain = gen->d.dNSName; if (domain->type == V_ASN1_IA5STRING && domain->data && domain->length) { const char* pattern = reinterpret_cast(domain->data); std::size_t pattern_length = domain->length; if (match_pattern(pattern, pattern_length, host_.c_str())) { GENERAL_NAMES_free(gens); return true; } } } else if (gen->type == GEN_IPADD && is_address) { ASN1_OCTET_STRING* ip_address = gen->d.iPAddress; if (ip_address->type == V_ASN1_OCTET_STRING && ip_address->data) { if (address.is_v4() && ip_address->length == 4) { ip::address_v4::bytes_type bytes = address.to_v4().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 4) == 0) { GENERAL_NAMES_free(gens); return true; } } else if (address.is_v6() && ip_address->length == 16) { ip::address_v6::bytes_type bytes = address.to_v6().to_bytes(); if (memcmp(bytes.data(), ip_address->data, 16) == 0) { GENERAL_NAMES_free(gens); return true; } } } } } GENERAL_NAMES_free(gens); // No match in the alternate names, so try the common names. We should only // use the "most specific" common name, which is the last one in the list. X509_NAME* name = X509_get_subject_name(cert); int i = -1; ASN1_STRING* common_name = 0; while ((i = X509_NAME_get_index_by_NID(name, NID_commonName, i)) >= 0) { X509_NAME_ENTRY* name_entry = X509_NAME_get_entry(name, i); common_name = X509_NAME_ENTRY_get_data(name_entry); } if (common_name && common_name->data && common_name->length) { const char* pattern = reinterpret_cast(common_name->data); std::size_t pattern_length = common_name->length; if (match_pattern(pattern, pattern_length, host_.c_str())) return true; } return false; } bool rfc2818_verification::match_pattern(const char* pattern, std::size_t pattern_length, const char* host) { using namespace std; // For tolower. const char* p = pattern; const char* p_end = p + pattern_length; const char* h = host; while (p != p_end && *h) { if (*p == '*') { ++p; while (*h && *h != '.') if (match_pattern(p, p_end - p, h++)) return true; } else if (tolower(*p) == tolower(*h)) { ++p; ++h; } else { return false; } } return p == p_end && !*h; } } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_IMPL_RFC2818_VERIFICATION_IPP galera-4-26.4.25/asio/asio/ssl/rfc2818_verification.hpp000644 000164 177776 00000005225 15107057155 023557 0ustar00jenkinsnogroup000000 000000 // // ssl/rfc2818_verification.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_RFC2818_VERIFICATION_HPP #define ASIO_SSL_RFC2818_VERIFICATION_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/ssl/detail/openssl_types.hpp" #include "asio/ssl/verify_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// Verifies a certificate against a hostname according to the rules described /// in RFC 2818. /** * @par Example * The following example shows how to synchronously open a secure connection to * a given host name: * @code * using asio::ip::tcp; * namespace ssl = asio::ssl; * typedef ssl::stream ssl_socket; * * // Create a context that uses the default paths for finding CA certificates. * ssl::context ctx(ssl::context::sslv23); * ctx.set_default_verify_paths(); * * // Open a socket and connect it to the remote host. * asio::io_context io_context; * ssl_socket sock(io_context, ctx); * tcp::resolver resolver(io_context); * tcp::resolver::query query("host.name", "https"); * asio::connect(sock.lowest_layer(), resolver.resolve(query)); * sock.lowest_layer().set_option(tcp::no_delay(true)); * * // Perform SSL handshake and verify the remote host's certificate. * sock.set_verify_mode(ssl::verify_peer); * sock.set_verify_callback(ssl::rfc2818_verification("host.name")); * sock.handshake(ssl_socket::client); * * // ... read and write as normal ... * @endcode */ class rfc2818_verification { public: /// The type of the function object's result. typedef bool result_type; /// Constructor. explicit rfc2818_verification(const std::string& host) : host_(host) { } /// Perform certificate verification. ASIO_DECL bool operator()(bool preverified, verify_context& ctx) const; private: // Helper function to check a host name against a pattern. ASIO_DECL static bool match_pattern(const char* pattern, std::size_t pattern_length, const char* host); // Helper function to check a host name against an IPv4 address // The host name to be checked. std::string host_; }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/rfc2818_verification.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_RFC2818_VERIFICATION_HPP galera-4-26.4.25/asio/asio/ssl/context.hpp000644 000164 177776 00000062063 15107057155 021407 0ustar00jenkinsnogroup000000 000000 // // ssl/context.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_CONTEXT_HPP #define ASIO_SSL_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/buffer.hpp" #include "asio/io_context.hpp" #include "asio/ssl/context_base.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/password_callback.hpp" #include "asio/ssl/detail/verify_callback.hpp" #include "asio/ssl/verify_mode.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { class context : public context_base, private noncopyable { public: /// The native handle type of the SSL context. typedef SSL_CTX* native_handle_type; /// Constructor. ASIO_DECL explicit context(method m); #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a context from another. /** * This constructor moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context(context&& other); /// Move-assign a context from another. /** * This assignment operator moves an SSL context from one object to another. * * @param other The other context object from which the move will occur. * * @note Following the move, the following operations only are valid for the * moved-from object: * @li Destruction. * @li As a target for move-assignment. */ ASIO_DECL context& operator=(context&& other); #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. ASIO_DECL ~context(); /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ ASIO_DECL native_handle_type native_handle(); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL void clear_options(options o); /// Clear options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The specified options, if currently enabled on the * context, are cleared. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_clear_options. */ ASIO_DECL ASIO_SYNC_OP_VOID clear_options(options o, asio::error_code& ec); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL void set_options(options o); /// Set options on the context. /** * This function may be used to configure the SSL options used by the context. * * @param o A bitmask of options. The available option values are defined in * the context_base class. The options are bitwise-ored with any existing * value for the options. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_options. */ ASIO_DECL ASIO_SYNC_OP_VOID set_options(options o, asio::error_code& ec); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL void set_verify_mode(verify_mode v); /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ ASIO_DECL ASIO_SYNC_OP_VOID set_verify_mode( verify_mode v, asio::error_code& ec); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL void set_verify_depth(int depth); /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the context. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify_depth. */ ASIO_DECL ASIO_SYNC_OP_VOID set_verify_depth( int depth, asio::error_code& ec); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_verify. */ template void set_verify_callback(VerifyCallback callback); /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_verify. */ template ASIO_SYNC_OP_VOID set_verify_callback(VerifyCallback callback, asio::error_code& ec); /// Load a certification authority file for performing verification. /** * This function is used to load one or more trusted certification authorities * from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void load_verify_file(const std::string& filename); /// Load a certification authority file for performing verification. /** * This function is used to load the certificates for one or more trusted * certification authorities from a file. * * @param filename The name of a file containing certification authority * certificates in PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL ASIO_SYNC_OP_VOID load_verify_file( const std::string& filename, asio::error_code& ec); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL void add_certificate_authority(const const_buffer& ca); /// Add certification authority for performing verification. /** * This function is used to add one trusted certification authority * from a memory buffer. * * @param ca The buffer containing the certification authority certificate. * The certificate must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_get_cert_store and @c X509_STORE_add_cert. */ ASIO_DECL ASIO_SYNC_OP_VOID add_certificate_authority( const const_buffer& ca, asio::error_code& ec); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL void set_default_verify_paths(); /// Configures the context to use the default directories for finding /// certification authority certificates. /** * This function specifies that the context should use the default, * system-dependent directories for locating certification authority * certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_verify_paths. */ ASIO_DECL ASIO_SYNC_OP_VOID set_default_verify_paths( asio::error_code& ec); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL void add_verify_path(const std::string& path); /// Add a directory containing certificate authority files to be used for /// performing verification. /** * This function is used to specify the name of a directory containing * certification authority certificates. Each file in the directory must * contain a single certificate. The files must be named using the subject * name's hash and an extension of ".0". * * @param path The name of a directory containing the certificates. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_load_verify_locations. */ ASIO_DECL ASIO_SYNC_OP_VOID add_verify_path( const std::string& path, asio::error_code& ec); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL void use_certificate( const const_buffer& certificate, file_format format); /// Use a certificate from a memory buffer. /** * This function is used to load a certificate into the context from a buffer. * * @param certificate The buffer containing the certificate. * * @param format The certificate format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate or SSL_CTX_use_certificate_ASN1. */ ASIO_DECL ASIO_SYNC_OP_VOID use_certificate( const const_buffer& certificate, file_format format, asio::error_code& ec); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL void use_certificate_file( const std::string& filename, file_format format); /// Use a certificate from a file. /** * This function is used to load a certificate into the context from a file. * * @param filename The name of the file containing the certificate. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_file. */ ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL void use_certificate_chain(const const_buffer& chain); /// Use a certificate chain from a memory buffer. /** * This function is used to load a certificate chain into the context from a * buffer. * * @param chain The buffer containing the certificate chain. The certificate * chain must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate and SSL_CTX_add_extra_chain_cert. */ ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_chain( const const_buffer& chain, asio::error_code& ec); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL void use_certificate_chain_file(const std::string& filename); /// Use a certificate chain from a file. /** * This function is used to load a certificate chain into the context from a * file. * * @param filename The name of the file containing the certificate. The file * must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_certificate_chain_file. */ ASIO_DECL ASIO_SYNC_OP_VOID use_certificate_chain_file( const std::string& filename, asio::error_code& ec); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL void use_private_key( const const_buffer& private_key, file_format format); /// Use a private key from a memory buffer. /** * This function is used to load a private key into the context from a buffer. * * @param private_key The buffer containing the private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey or SSL_CTX_use_PrivateKey_ASN1. */ ASIO_DECL ASIO_SYNC_OP_VOID use_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL void use_private_key_file( const std::string& filename, file_format format); /// Use a private key from a file. /** * This function is used to load a private key into the context from a file. * * @param filename The name of the file containing the private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_PrivateKey_file. */ ASIO_DECL ASIO_SYNC_OP_VOID use_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL void use_rsa_private_key( const const_buffer& private_key, file_format format); /// Use an RSA private key from a memory buffer. /** * This function is used to load an RSA private key into the context from a * buffer. * * @param private_key The buffer containing the RSA private key. * * @param format The private key format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey or SSL_CTX_use_RSAPrivateKey_ASN1. */ ASIO_DECL ASIO_SYNC_OP_VOID use_rsa_private_key( const const_buffer& private_key, file_format format, asio::error_code& ec); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL void use_rsa_private_key_file( const std::string& filename, file_format format); /// Use an RSA private key from a file. /** * This function is used to load an RSA private key into the context from a * file. * * @param filename The name of the file containing the RSA private key. * * @param format The file format (ASN.1 or PEM). * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_use_RSAPrivateKey_file. */ ASIO_DECL ASIO_SYNC_OP_VOID use_rsa_private_key_file( const std::string& filename, file_format format, asio::error_code& ec); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh(const const_buffer& dh); /// Use the specified memory buffer to obtain the temporary Diffie-Hellman /// parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a buffer. * * @param dh The memory buffer containing the Diffie-Hellman parameters. The * buffer must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL ASIO_SYNC_OP_VOID use_tmp_dh( const const_buffer& dh, asio::error_code& ec); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL void use_tmp_dh_file(const std::string& filename); /// Use the specified file to obtain the temporary Diffie-Hellman parameters. /** * This function is used to load Diffie-Hellman parameters into the context * from a file. * * @param filename The name of the file containing the Diffie-Hellman * parameters. The file must use the PEM format. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_tmp_dh. */ ASIO_DECL ASIO_SYNC_OP_VOID use_tmp_dh_file( const std::string& filename, asio::error_code& ec); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template void set_password_callback(PasswordCallback callback); /// Set the password callback. /** * This function is used to specify a callback function to obtain password * information about an encrypted key in PEM format. * * @param callback The function object to be used for obtaining the password. * The function signature of the handler must be: * @code std::string password_callback( * std::size_t max_length, // The maximum size for a password. * password_purpose purpose // Whether password is for reading or writing. * ); @endcode * The return value of the callback is a string containing the password. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_CTX_set_default_passwd_cb. */ template ASIO_SYNC_OP_VOID set_password_callback(PasswordCallback callback, asio::error_code& ec); private: struct bio_cleanup; struct x509_cleanup; struct evp_pkey_cleanup; struct rsa_cleanup; struct dh_cleanup; // Helper function used to set a peer certificate verification callback. ASIO_DECL ASIO_SYNC_OP_VOID do_set_verify_callback( detail::verify_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); // Helper function used to set a password callback. ASIO_DECL ASIO_SYNC_OP_VOID do_set_password_callback( detail::password_callback_base* callback, asio::error_code& ec); // Callback used when the SSL implementation wants a password. ASIO_DECL static int password_callback_function( char* buf, int size, int purpose, void* data); // Helper function to set the temporary Diffie-Hellman parameters from a BIO. ASIO_DECL ASIO_SYNC_OP_VOID do_use_tmp_dh( BIO* bio, asio::error_code& ec); // Helper function to make a BIO from a memory buffer. ASIO_DECL BIO* make_buffer_bio(const const_buffer& b); // The underlying native implementation. native_handle_type handle_; // Ensure openssl is initialised. asio::ssl::detail::openssl_init<> init_; }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/ssl/impl/context.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/context.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_CONTEXT_HPP galera-4-26.4.25/asio/asio/ssl/stream_base.hpp000644 000164 177776 00000002166 15107057155 022206 0ustar00jenkinsnogroup000000 000000 // // ssl/stream_base.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_BASE_HPP #define ASIO_SSL_STREAM_BASE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// The stream_base class is used as a base for the asio::ssl::stream /// class template so that we have a common place to define various enums. class stream_base { public: /// Different handshake types. enum handshake_type { /// Perform handshaking as a client. client, /// Perform handshaking as a server. server }; protected: /// Protected destructor to prevent deletion through this type. ~stream_base() { } }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_BASE_HPP galera-4-26.4.25/asio/asio/ssl/detail/000755 000164 177776 00000000000 15107057160 020441 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ssl/detail/stream_core.hpp000644 000164 177776 00000007430 15107057155 023465 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/stream_core.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_STREAM_CORE_HPP #define ASIO_SSL_DETAIL_STREAM_CORE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/deadline_timer.hpp" #else // defined(ASIO_HAS_BOOST_DATE_TIME) # include "asio/steady_timer.hpp" #endif // defined(ASIO_HAS_BOOST_DATE_TIME) #include "asio/ssl/detail/engine.hpp" #include "asio/buffer.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { struct stream_core { // According to the OpenSSL documentation, this is the buffer size that is // sufficient to hold the largest possible TLS record. enum { max_tls_record_size = 17 * 1024 }; template stream_core(SSL_CTX* context, const Executor& ex) : engine_(context), pending_read_(ex), pending_write_(ex), output_buffer_space_(max_tls_record_size), output_buffer_(asio::buffer(output_buffer_space_)), input_buffer_space_(max_tls_record_size), input_buffer_(asio::buffer(input_buffer_space_)) { pending_read_.expires_at(neg_infin()); pending_write_.expires_at(neg_infin()); } ~stream_core() { } // The SSL engine. engine engine_; #if defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::deadline_timer pending_read_; // Timer used for storing queued write operations. asio::deadline_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::deadline_timer::time_type neg_infin() { return boost::posix_time::neg_infin; } // Helper function for obtaining a time value that never fires. static asio::deadline_timer::time_type pos_infin() { return boost::posix_time::pos_infin; } // Helper function to get a timer's expiry time. static asio::deadline_timer::time_type expiry( const asio::deadline_timer& timer) { return timer.expires_at(); } #else // defined(ASIO_HAS_BOOST_DATE_TIME) // Timer used for storing queued read operations. asio::steady_timer pending_read_; // Timer used for storing queued write operations. asio::steady_timer pending_write_; // Helper function for obtaining a time value that always fires. static asio::steady_timer::time_point neg_infin() { return (asio::steady_timer::time_point::min)(); } // Helper function for obtaining a time value that never fires. static asio::steady_timer::time_point pos_infin() { return (asio::steady_timer::time_point::max)(); } // Helper function to get a timer's expiry time. static asio::steady_timer::time_point expiry( const asio::steady_timer& timer) { return timer.expiry(); } #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // Buffer space used to prepare output intended for the transport. std::vector output_buffer_space_; // A buffer that may be used to prepare output intended for the transport. const asio::mutable_buffer output_buffer_; // Buffer space used to read input intended for the engine. std::vector input_buffer_space_; // A buffer that may be used to read input intended for the engine. const asio::mutable_buffer input_buffer_; // The buffer pointing to the engine's unconsumed input. asio::const_buffer input_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_STREAM_CORE_HPP galera-4-26.4.25/asio/asio/ssl/detail/buffered_handshake_op.hpp000644 000164 177776 00000006112 15107057155 025444 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/buffered_handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { template class buffered_handshake_op { public: buffered_handshake_op(stream_base::handshake_type type, const ConstBufferSequence& buffers) : type_(type), buffers_(buffers), total_buffer_size_(asio::buffer_size(buffers_)) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { return this->process(eng, ec, bytes_transferred, asio::buffer_sequence_begin(buffers_), asio::buffer_sequence_end(buffers_)); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: template engine::want process(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred, Iterator begin, Iterator end) const { Iterator iter = begin; std::size_t accumulated_size = 0; for (;;) { engine::want want = eng.handshake(type_, ec); if (want != engine::want_input_and_retry || bytes_transferred == total_buffer_size_) return want; // Find the next buffer piece to be fed to the engine. while (iter != end) { const_buffer buffer(*iter); // Skip over any buffers which have already been consumed by the engine. if (bytes_transferred >= accumulated_size + buffer.size()) { accumulated_size += buffer.size(); ++iter; continue; } // The current buffer may have been partially consumed by the engine on // a previous iteration. If so, adjust the buffer to point to the // unused portion. if (bytes_transferred > accumulated_size) buffer = buffer + (bytes_transferred - accumulated_size); // Pass the buffer to the engine, and update the bytes transferred to // reflect the total number of bytes consumed so far. bytes_transferred += buffer.size(); buffer = eng.put_input(buffer); bytes_transferred -= buffer.size(); break; } } } stream_base::handshake_type type_; ConstBufferSequence buffers_; std::size_t total_buffer_size_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_BUFFERED_HANDSHAKE_OP_HPP galera-4-26.4.25/asio/asio/ssl/detail/impl/000755 000164 177776 00000000000 15107057160 021402 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/asio/asio/ssl/detail/impl/engine.ipp000644 000164 177776 00000020141 15107057155 023363 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/impl/engine.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #define ASIO_SSL_DETAIL_IMPL_ENGINE_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/ssl/error.hpp" #include "asio/ssl/verify_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { engine::engine(SSL_CTX* context) : ssl_(::SSL_new(context)) { if (!ssl_) { asio::error_code ec( static_cast(::ERR_get_error()), asio::error::get_ssl_category()); asio::detail::throw_error(ec, "engine"); } #if (OPENSSL_VERSION_NUMBER < 0x10000000L) accept_mutex().init(); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) ::SSL_set_mode(ssl_, SSL_MODE_ENABLE_PARTIAL_WRITE); ::SSL_set_mode(ssl_, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); #if defined(SSL_MODE_RELEASE_BUFFERS) ::SSL_set_mode(ssl_, SSL_MODE_RELEASE_BUFFERS); #endif // defined(SSL_MODE_RELEASE_BUFFERS) ::BIO* int_bio = 0; ::BIO_new_bio_pair(&int_bio, 0, &ext_bio_, 0); ::SSL_set_bio(ssl_, int_bio, int_bio); } engine::~engine() { if (SSL_get_app_data(ssl_)) { delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, 0); } ::BIO_free(ext_bio_); ::SSL_free(ssl_); } SSL* engine::native_handle() { return ssl_; } asio::error_code engine::set_verify_mode( verify_mode v, asio::error_code& ec) { ::SSL_set_verify(ssl_, v, ::SSL_get_verify_callback(ssl_)); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_depth( int depth, asio::error_code& ec) { ::SSL_set_verify_depth(ssl_, depth); ec = asio::error_code(); return ec; } asio::error_code engine::set_verify_callback( verify_callback_base* callback, asio::error_code& ec) { if (SSL_get_app_data(ssl_)) delete static_cast(SSL_get_app_data(ssl_)); SSL_set_app_data(ssl_, callback); ::SSL_set_verify(ssl_, ::SSL_get_verify_mode(ssl_), &engine::verify_callback_function); ec = asio::error_code(); return ec; } int engine::verify_callback_function(int preverified, X509_STORE_CTX* ctx) { if (ctx) { if (SSL* ssl = static_cast( ::X509_STORE_CTX_get_ex_data( ctx, ::SSL_get_ex_data_X509_STORE_CTX_idx()))) { if (SSL_get_app_data(ssl)) { verify_callback_base* callback = static_cast( SSL_get_app_data(ssl)); verify_context verify_ctx(ctx); return callback->call(preverified != 0, verify_ctx) ? 1 : 0; } } } return 0; } engine::want engine::handshake( stream_base::handshake_type type, asio::error_code& ec) { return perform((type == asio::ssl::stream_base::client) ? &engine::do_connect : &engine::do_accept, 0, 0, ec, 0); } engine::want engine::shutdown(asio::error_code& ec) { return perform(&engine::do_shutdown, 0, 0, ec, 0); } engine::want engine::write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (data.size() == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_write, const_cast(data.data()), data.size(), ec, &bytes_transferred); } engine::want engine::read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred) { if (data.size() == 0) { ec = asio::error_code(); return engine::want_nothing; } return perform(&engine::do_read, data.data(), data.size(), ec, &bytes_transferred); } asio::mutable_buffer engine::get_output( const asio::mutable_buffer& data) { int length = ::BIO_read(ext_bio_, data.data(), static_cast(data.size())); return asio::buffer(data, length > 0 ? static_cast(length) : 0); } asio::const_buffer engine::put_input( const asio::const_buffer& data) { int length = ::BIO_write(ext_bio_, data.data(), static_cast(data.size())); return asio::buffer(data + (length > 0 ? static_cast(length) : 0)); } const asio::error_code& engine::map_error_code( asio::error_code& ec) const { // We only want to map the error::eof code. if (ec != asio::error::eof) return ec; // If there's data yet to be read, it's an error. if (BIO_wpending(ext_bio_)) { ec = asio::ssl::error::stream_truncated; return ec; } // SSL v2 doesn't provide a protocol-level shutdown, so an eof on the // underlying transport is passed through. #if (OPENSSL_VERSION_NUMBER < 0x10100000L) if (SSL_version(ssl_) == SSL2_VERSION) return ec; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) // Otherwise, the peer should have negotiated a proper shutdown. if ((::SSL_get_shutdown(ssl_) & SSL_RECEIVED_SHUTDOWN) == 0) { ec = asio::ssl::error::stream_truncated; } return ec; } #if (OPENSSL_VERSION_NUMBER < 0x10000000L) asio::detail::static_mutex& engine::accept_mutex() { static asio::detail::static_mutex mutex = ASIO_STATIC_MUTEX_INIT; return mutex; } #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) engine::want engine::perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred) { std::size_t pending_output_before = ::BIO_ctrl_pending(ext_bio_); ::ERR_clear_error(); int result = (this->*op)(data, length); int ssl_error = ::SSL_get_error(ssl_, result); int sys_error = static_cast(::ERR_get_error()); std::size_t pending_output_after = ::BIO_ctrl_pending(ext_bio_); if (ssl_error == SSL_ERROR_SSL) { ec = asio::error_code(sys_error, asio::error::get_ssl_category()); return pending_output_after > pending_output_before ? want_output : want_nothing; } if (ssl_error == SSL_ERROR_SYSCALL) { if (sys_error == 0) { ec = asio::ssl::error::unspecified_system_error; } else { ec = asio::error_code(sys_error, asio::error::get_ssl_category()); } return pending_output_after > pending_output_before ? want_output : want_nothing; } if (result > 0 && bytes_transferred) *bytes_transferred = static_cast(result); if (ssl_error == SSL_ERROR_WANT_WRITE) { ec = asio::error_code(); return want_output_and_retry; } else if (pending_output_after > pending_output_before) { ec = asio::error_code(); return result > 0 ? want_output : want_output_and_retry; } else if (ssl_error == SSL_ERROR_WANT_READ) { ec = asio::error_code(); return want_input_and_retry; } else if (ssl_error == SSL_ERROR_ZERO_RETURN) { ec = asio::error::eof; return want_nothing; } else if (ssl_error == SSL_ERROR_NONE) { ec = asio::error_code(); return want_nothing; } else { ec = asio::ssl::error::unexpected_result; return want_nothing; } } int engine::do_accept(void*, std::size_t) { #if (OPENSSL_VERSION_NUMBER < 0x10000000L) asio::detail::static_mutex::scoped_lock lock(accept_mutex()); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) return ::SSL_accept(ssl_); } int engine::do_connect(void*, std::size_t) { return ::SSL_connect(ssl_); } int engine::do_shutdown(void*, std::size_t) { int result = ::SSL_shutdown(ssl_); if (result == 0) result = ::SSL_shutdown(ssl_); return result; } int engine::do_read(void* data, std::size_t length) { return ::SSL_read(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } int engine::do_write(void* data, std::size_t length) { return ::SSL_write(ssl_, data, length < INT_MAX ? static_cast(length) : INT_MAX); } } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_ENGINE_IPP galera-4-26.4.25/asio/asio/ssl/detail/impl/openssl_init.ipp000644 000164 177776 00000012261 15107057155 024630 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/impl/openssl_init.ipp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2005 Voipster / Indrek dot Juhani at voipster dot com // Copyright (c) 2005-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #define ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/assert.hpp" #include "asio/detail/mutex.hpp" #include "asio/detail/tss_ptr.hpp" #include "asio/ssl/detail/openssl_init.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base::do_init { public: do_init() { #if (OPENSSL_VERSION_NUMBER < 0x10100000L) ::SSL_library_init(); ::SSL_load_error_strings(); ::OpenSSL_add_all_algorithms(); mutexes_.resize(::CRYPTO_num_locks()); for (size_t i = 0; i < mutexes_.size(); ++i) mutexes_[i].reset(new asio::detail::mutex); ::CRYPTO_set_locking_callback(&do_init::openssl_locking_func); #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::CRYPTO_set_id_callback(&do_init::openssl_id_func); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) null_compression_methods_ = sk_SSL_COMP_new_null(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } ~do_init() { #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) sk_SSL_COMP_free(null_compression_methods_); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::CRYPTO_set_id_callback(0); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER < 0x10100000L) ::CRYPTO_set_locking_callback(0); ::ERR_free_strings(); ::EVP_cleanup(); ::CRYPTO_cleanup_all_ex_data(); #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if (OPENSSL_VERSION_NUMBER < 0x10000000L) ::ERR_remove_state(0); #elif (OPENSSL_VERSION_NUMBER < 0x10100000L) ::ERR_remove_thread_state(NULL); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER >= 0x10002000L) \ && (OPENSSL_VERSION_NUMBER < 0x10100000L) \ && !defined(SSL_OP_NO_COMPRESSION) ::SSL_COMP_free_compression_methods(); #endif // (OPENSSL_VERSION_NUMBER >= 0x10002000L) // && (OPENSSL_VERSION_NUMBER < 0x10100000L) // && !defined(SSL_OP_NO_COMPRESSION) #if !defined(OPENSSL_IS_BORINGSSL) && !defined(ASIO_USE_WOLFSSL) ::CONF_modules_unload(1); #endif // !defined(OPENSSL_IS_BORINGSSL) && !defined(ASIO_USE_WOLFSSL) #if !defined(OPENSSL_NO_ENGINE) \ && (OPENSSL_VERSION_NUMBER < 0x10100000L) ::ENGINE_cleanup(); #endif // !defined(OPENSSL_NO_ENGINE) // && (OPENSSL_VERSION_NUMBER < 0x10100000L) } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* get_null_compression_methods() const { return null_compression_methods_; } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: #if (OPENSSL_VERSION_NUMBER < 0x10000000L) static unsigned long openssl_id_func() { #if defined(ASIO_WINDOWS) || defined(__CYGWIN__) return ::GetCurrentThreadId(); #else // defined(ASIO_WINDOWS) || defined(__CYGWIN__) void* id = &errno; ASIO_ASSERT(sizeof(unsigned long) >= sizeof(void*)); return reinterpret_cast(id); #endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__) } #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) #if (OPENSSL_VERSION_NUMBER < 0x10100000L) static void openssl_locking_func(int mode, int n, const char* /*file*/, int /*line*/) { if (mode & CRYPTO_LOCK) instance()->mutexes_[n]->lock(); else instance()->mutexes_[n]->unlock(); } // Mutexes to be used in locking callbacks. std::vector > mutexes_; #endif // (OPENSSL_VERSION_NUMBER < 0x10100000L) #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* null_compression_methods_; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; asio::detail::shared_ptr openssl_init_base::instance() { static asio::detail::shared_ptr init(new do_init); return init; } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) STACK_OF(SSL_COMP)* openssl_init_base::get_null_compression_methods() { return instance()->get_null_compression_methods(); } #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IMPL_OPENSSL_INIT_IPP galera-4-26.4.25/asio/asio/ssl/detail/openssl_types.hpp000644 000164 177776 00000001712 15107057155 024066 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/openssl_types.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #define ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/socket_types.hpp" #if defined(ASIO_USE_WOLFSSL) # include #endif // defined(ASIO_USE_WOLFSSL) #include #include #if !defined(OPENSSL_NO_ENGINE) # include #endif // !defined(OPENSSL_NO_ENGINE) #include #include #include #include #endif // ASIO_SSL_DETAIL_OPENSSL_TYPES_HPP galera-4-26.4.25/asio/asio/ssl/detail/password_callback.hpp000644 000164 177776 00000002627 15107057155 024643 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/password_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #define ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include "asio/ssl/context_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class password_callback_base { public: virtual ~password_callback_base() { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) = 0; }; template class password_callback : public password_callback_base { public: explicit password_callback(PasswordCallback callback) : callback_(callback) { } virtual std::string call(std::size_t size, context_base::password_purpose purpose) { return callback_(size, purpose); } private: PasswordCallback callback_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_PASSWORD_CALLBACK_HPP galera-4-26.4.25/asio/asio/ssl/detail/read_op.hpp000644 000164 177776 00000003017 15107057155 022570 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/read_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_READ_OP_HPP #define ASIO_SSL_DETAIL_READ_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { template class read_op { public: read_op(const MutableBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::mutable_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.read(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: MutableBufferSequence buffers_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_READ_OP_HPP galera-4-26.4.25/asio/asio/ssl/detail/openssl_init.hpp000644 000164 177776 00000005552 15107057155 023673 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/openssl_init.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #define ASIO_SSL_DETAIL_OPENSSL_INIT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/memory.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class openssl_init_base : private noncopyable { protected: // Class that performs the actual initialisation. class do_init; // Helper function to manage a do_init singleton. The static instance of the // openssl_init object ensures that this function is always called before // main, and therefore before any other threads can get started. The do_init // instance must be static in this function to ensure that it gets // initialised before any other global objects try to use it. ASIO_DECL static asio::detail::shared_ptr instance(); #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) // Get an empty stack of compression methods, to be used when disabling // compression. ASIO_DECL static STACK_OF(SSL_COMP)* get_null_compression_methods(); #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) }; template class openssl_init : private openssl_init_base { public: // Constructor. openssl_init() : ref_(instance()) { using namespace std; // For memmove. // Ensure openssl_init::instance_ is linked in. openssl_init* tmp = &instance_; memmove(&tmp, &tmp, sizeof(openssl_init*)); } // Destructor. ~openssl_init() { } #if !defined(SSL_OP_NO_COMPRESSION) \ && (OPENSSL_VERSION_NUMBER >= 0x00908000L) using openssl_init_base::get_null_compression_methods; #endif // !defined(SSL_OP_NO_COMPRESSION) // && (OPENSSL_VERSION_NUMBER >= 0x00908000L) private: // Instance to force initialisation of openssl at global scope. static openssl_init instance_; // Reference to singleton do_init object to ensure that openssl does not get // cleaned up until the last user has finished with it. asio::detail::shared_ptr ref_; }; template openssl_init openssl_init::instance_; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/openssl_init.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_OPENSSL_INIT_HPP galera-4-26.4.25/asio/asio/ssl/detail/write_op.hpp000644 000164 177776 00000003013 15107057155 023003 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/write_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_WRITE_OP_HPP #define ASIO_SSL_DETAIL_WRITE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { template class write_op { public: write_op(const ConstBufferSequence& buffers) : buffers_(buffers) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { asio::const_buffer buffer = asio::detail::buffer_sequence_adapter::first(buffers_); return eng.write(buffer, ec, bytes_transferred); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t& bytes_transferred) const { handler(ec, bytes_transferred); } private: ConstBufferSequence buffers_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_WRITE_OP_HPP galera-4-26.4.25/asio/asio/ssl/detail/verify_callback.hpp000644 000164 177776 00000002437 15107057155 024304 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/verify_callback.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #define ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/verify_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class verify_callback_base { public: virtual ~verify_callback_base() { } virtual bool call(bool preverified, verify_context& ctx) = 0; }; template class verify_callback : public verify_callback_base { public: explicit verify_callback(VerifyCallback callback) : callback_(callback) { } virtual bool call(bool preverified, verify_context& ctx) { return callback_(preverified, ctx); } private: VerifyCallback callback_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_VERIFY_CALLBACK_HPP galera-4-26.4.25/asio/asio/ssl/detail/engine.hpp000644 000164 177776 00000012467 15107057155 022435 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/engine.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_ENGINE_HPP #define ASIO_SSL_DETAIL_ENGINE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/buffer.hpp" #include "asio/detail/static_mutex.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/ssl/detail/verify_callback.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/ssl/verify_mode.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class engine { public: enum want { // Returned by functions to indicate that the engine wants input. The input // buffer should be updated to point to the data. The engine then needs to // be called again to retry the operation. want_input_and_retry = -2, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. The engine then // needs to be called again to retry the operation. want_output_and_retry = -1, // Returned by functions to indicate that the engine doesn't need input or // output. want_nothing = 0, // Returned by functions to indicate that the engine wants to write output. // The output buffer points to the data to be written. After that the // operation is complete, and the engine does not need to be called again. want_output = 1 }; // Construct a new engine for the specified context. ASIO_DECL explicit engine(SSL_CTX* context); // Destructor. ASIO_DECL ~engine(); // Get the underlying implementation in the native type. ASIO_DECL SSL* native_handle(); // Set the peer verification mode. ASIO_DECL asio::error_code set_verify_mode( verify_mode v, asio::error_code& ec); // Set the peer verification depth. ASIO_DECL asio::error_code set_verify_depth( int depth, asio::error_code& ec); // Set a peer certificate verification callback. ASIO_DECL asio::error_code set_verify_callback( verify_callback_base* callback, asio::error_code& ec); // Perform an SSL handshake using either SSL_connect (client-side) or // SSL_accept (server-side). ASIO_DECL want handshake( stream_base::handshake_type type, asio::error_code& ec); // Perform a graceful shutdown of the SSL session. ASIO_DECL want shutdown(asio::error_code& ec); // Write bytes to the SSL session. ASIO_DECL want write(const asio::const_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Read bytes from the SSL session. ASIO_DECL want read(const asio::mutable_buffer& data, asio::error_code& ec, std::size_t& bytes_transferred); // Get output data to be written to the transport. ASIO_DECL asio::mutable_buffer get_output( const asio::mutable_buffer& data); // Put input data that was read from the transport. ASIO_DECL asio::const_buffer put_input( const asio::const_buffer& data); // Map an error::eof code returned by the underlying transport according to // the type and state of the SSL session. Returns a const reference to the // error code object, suitable for passing to a completion handler. ASIO_DECL const asio::error_code& map_error_code( asio::error_code& ec) const; private: // Disallow copying and assignment. engine(const engine&); engine& operator=(const engine&); // Callback used when the SSL implementation wants to verify a certificate. ASIO_DECL static int verify_callback_function( int preverified, X509_STORE_CTX* ctx); #if (OPENSSL_VERSION_NUMBER < 0x10000000L) // The SSL_accept function may not be thread safe. This mutex is used to // protect all calls to the SSL_accept function. ASIO_DECL static asio::detail::static_mutex& accept_mutex(); #endif // (OPENSSL_VERSION_NUMBER < 0x10000000L) // Perform one operation. Returns >= 0 on success or error, want_read if the // operation needs more input, or want_write if it needs to write some output // before the operation can complete. ASIO_DECL want perform(int (engine::* op)(void*, std::size_t), void* data, std::size_t length, asio::error_code& ec, std::size_t* bytes_transferred); // Adapt the SSL_accept function to the signature needed for perform(). ASIO_DECL int do_accept(void*, std::size_t); // Adapt the SSL_connect function to the signature needed for perform(). ASIO_DECL int do_connect(void*, std::size_t); // Adapt the SSL_shutdown function to the signature needed for perform(). ASIO_DECL int do_shutdown(void*, std::size_t); // Adapt the SSL_read function to the signature needed for perform(). ASIO_DECL int do_read(void* data, std::size_t length); // Adapt the SSL_write function to the signature needed for perform(). ASIO_DECL int do_write(void* data, std::size_t length); SSL* ssl_; BIO* ext_bio_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/detail/impl/engine.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_DETAIL_ENGINE_HPP galera-4-26.4.25/asio/asio/ssl/detail/io.hpp000644 000164 177776 00000026444 15107057155 021577 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/io.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_IO_HPP #define ASIO_SSL_DETAIL_IO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/ssl/detail/stream_core.hpp" #include "asio/write.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { template std::size_t io(Stream& next_layer, stream_core& core, const Operation& op, asio::error_code& ec) { asio::error_code io_ec; std::size_t bytes_transferred = 0; do switch (op(core.engine_, ec, bytes_transferred)) { case engine::want_input_and_retry: // If the input buffer is empty then we need to read some more data from // the underlying transport. if (core.input_.size() == 0) { core.input_ = asio::buffer(core.input_buffer_, next_layer.read_some(core.input_buffer_, io_ec)); if (!ec) ec = io_ec; } // Pass the new input data to the engine. core.input_ = core.engine_.put_input(core.input_); // Try the operation again. continue; case engine::want_output_and_retry: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), io_ec); if (!ec) ec = io_ec; // Try the operation again. continue; case engine::want_output: // Get output data from the engine and write it to the underlying // transport. asio::write(next_layer, core.engine_.get_output(core.output_buffer_), io_ec); if (!ec) ec = io_ec; // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; default: // Operation is complete. Return result to caller. core.engine_.map_error_code(ec); return bytes_transferred; } while (!ec); // Operation failed. Return result to caller. core.engine_.map_error_code(ec); return 0; } template class io_op { public: io_op(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) : next_layer_(next_layer), core_(core), op_(op), start_(0), want_(engine::want_nothing), bytes_transferred_(0), handler_(ASIO_MOVE_CAST(Handler)(handler)) { } #if defined(ASIO_HAS_MOVE) io_op(const io_op& other) : next_layer_(other.next_layer_), core_(other.core_), op_(other.op_), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(other.handler_) { } io_op(io_op&& other) : next_layer_(other.next_layer_), core_(other.core_), op_(ASIO_MOVE_CAST(Operation)(other.op_)), start_(other.start_), want_(other.want_), ec_(other.ec_), bytes_transferred_(other.bytes_transferred_), handler_(ASIO_MOVE_CAST(Handler)(other.handler_)) { } #endif // defined(ASIO_HAS_MOVE) void operator()(asio::error_code ec, std::size_t bytes_transferred = ~std::size_t(0), int start = 0) { switch (start_ = start) { case 1: // Called after at least one async operation. do { switch (want_ = op_(core_.engine_, ec_, bytes_transferred_)) { case engine::want_input_and_retry: // If the input buffer already has data in it we can pass it to the // engine and then retry the operation immediately. if (core_.input_.size() != 0) { core_.input_ = core_.engine_.put_input(core_.input_); continue; } // The engine wants more data to be read from input. However, we // cannot allow more than one read operation at a time on the // underlying transport. The pending_read_ timer's expiry is set to // pos_infin if a read is in progress, and neg_infin otherwise. if (core_.expiry(core_.pending_read_) == core_.neg_infin()) { // Prevent other read operations from being started. core_.pending_read_.expires_at(core_.pos_infin()); // Start reading some data from the underlying transport. next_layer_.async_read_some( asio::buffer(core_.input_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current read operation completes. core_.pending_read_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; case engine::want_output_and_retry: case engine::want_output: // The engine wants some data to be written to the output. However, we // cannot allow more than one write operation at a time on the // underlying transport. The pending_write_ timer's expiry is set to // pos_infin if a write is in progress, and neg_infin otherwise. if (core_.expiry(core_.pending_write_) == core_.neg_infin()) { // Prevent other write operations from being started. core_.pending_write_.expires_at(core_.pos_infin()); // Start writing all the data to the underlying transport. asio::async_write(next_layer_, core_.engine_.get_output(core_.output_buffer_), ASIO_MOVE_CAST(io_op)(*this)); } else { // Wait until the current write operation completes. core_.pending_write_.async_wait(ASIO_MOVE_CAST(io_op)(*this)); } // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; default: // The SSL operation is done and we can invoke the handler, but we // have to keep in mind that this function might be being called from // the async operation's initiating function. In this case we're not // allowed to call the handler directly. Instead, issue a zero-sized // read so the handler runs "as-if" posted using io_context::post(). if (start) { next_layer_.async_read_some( asio::buffer(core_.input_buffer_, 0), ASIO_MOVE_CAST(io_op)(*this)); // Yield control until asynchronous operation completes. Control // resumes at the "default:" label below. return; } else { // Continue on to run handler directly. break; } } default: if (bytes_transferred == ~std::size_t(0)) bytes_transferred = 0; // Timer cancellation, no data transferred. else if (!ec_) ec_ = ec; switch (want_) { case engine::want_input_and_retry: // Add received data to the engine's input. core_.input_ = asio::buffer( core_.input_buffer_, bytes_transferred); core_.input_ = core_.engine_.put_input(core_.input_); // Release any waiting read operations. core_.pending_read_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output_and_retry: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Try the operation again. continue; case engine::want_output: // Release any waiting write operations. core_.pending_write_.expires_at(core_.neg_infin()); // Fall through to call handler. default: // Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), ec_ ? 0 : bytes_transferred_); // Our work here is done. return; } } while (!ec_); // Operation failed. Pass the result to the handler. op_.call_handler(handler_, core_.engine_.map_error_code(ec_), 0); } } //private: Stream& next_layer_; stream_core& core_; Operation op_; int start_; engine::want want_; asio::error_code ec_; std::size_t bytes_transferred_; Handler handler_; }; template inline void* asio_handler_allocate(std::size_t size, io_op* this_handler) { return asio_handler_alloc_helpers::allocate( size, this_handler->handler_); } template inline void asio_handler_deallocate(void* pointer, std::size_t size, io_op* this_handler) { asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); } template inline bool asio_handler_is_continuation( io_op* this_handler) { return this_handler->start_ == 0 ? true : asio_handler_cont_helpers::is_continuation(this_handler->handler_); } template inline void asio_handler_invoke(Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void asio_handler_invoke(const Function& function, io_op* this_handler) { asio_handler_invoke_helpers::invoke( function, this_handler->handler_); } template inline void async_io(Stream& next_layer, stream_core& core, const Operation& op, Handler& handler) { io_op( next_layer, core, op, handler)( asio::error_code(), 0, 1); } } // namespace detail } // namespace ssl template struct associated_allocator< ssl::detail::io_op, Allocator> { typedef typename associated_allocator::type type; static type get(const ssl::detail::io_op& h, const Allocator& a = Allocator()) ASIO_NOEXCEPT { return associated_allocator::get(h.handler_, a); } }; template struct associated_executor< ssl::detail::io_op, Executor> { typedef typename associated_executor::type type; static type get(const ssl::detail::io_op& h, const Executor& ex = Executor()) ASIO_NOEXCEPT { return associated_executor::get(h.handler_, ex); } }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_IO_HPP galera-4-26.4.25/asio/asio/ssl/detail/shutdown_op.hpp000644 000164 177776 00000002744 15107057155 023536 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/shutdown_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #define ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class shutdown_op { public: engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.shutdown(ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { if (ec == asio::error::eof) { // The engine only generates an eof when the shutdown notification has // been received from the peer. This indicates that the shutdown has // completed successfully, and thus need not be passed on to the handler. handler(asio::error_code()); } else { handler(ec); } } }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_SHUTDOWN_OP_HPP galera-4-26.4.25/asio/asio/ssl/detail/handshake_op.hpp000644 000164 177776 00000002437 15107057155 023610 0ustar00jenkinsnogroup000000 000000 // // ssl/detail/handshake_op.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #define ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/engine.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { namespace detail { class handshake_op { public: handshake_op(stream_base::handshake_type type) : type_(type) { } engine::want operator()(engine& eng, asio::error_code& ec, std::size_t& bytes_transferred) const { bytes_transferred = 0; return eng.handshake(type_, ec); } template void call_handler(Handler& handler, const asio::error_code& ec, const std::size_t&) const { handler(ec); } private: stream_base::handshake_type type_; }; } // namespace detail } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_DETAIL_HANDSHAKE_OP_HPP galera-4-26.4.25/asio/asio/ssl/verify_mode.hpp000644 000164 177776 00000003215 15107057155 022225 0ustar00jenkinsnogroup000000 000000 // // ssl/verify_mode.hpp // ~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_MODE_HPP #define ASIO_SSL_VERIFY_MODE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// Bitmask type for peer verification. /** * Possible values are: * * @li @ref verify_none * @li @ref verify_peer * @li @ref verify_fail_if_no_peer_cert * @li @ref verify_client_once */ typedef int verify_mode; #if defined(GENERATING_DOCUMENTATION) /// No verification. const int verify_none = implementation_defined; /// Verify the peer. const int verify_peer = implementation_defined; /// Fail verification if the peer has no certificate. Ignored unless /// @ref verify_peer is set. const int verify_fail_if_no_peer_cert = implementation_defined; /// Do not request client certificate on renegotiation. Ignored unless /// @ref verify_peer is set. const int verify_client_once = implementation_defined; #else const int verify_none = SSL_VERIFY_NONE; const int verify_peer = SSL_VERIFY_PEER; const int verify_fail_if_no_peer_cert = SSL_VERIFY_FAIL_IF_NO_PEER_CERT; const int verify_client_once = SSL_VERIFY_CLIENT_ONCE; #endif } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_MODE_HPP galera-4-26.4.25/asio/asio/ssl/verify_context.hpp000644 000164 177776 00000003172 15107057155 022767 0ustar00jenkinsnogroup000000 000000 // // ssl/verify_context.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_VERIFY_CONTEXT_HPP #define ASIO_SSL_VERIFY_CONTEXT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// A simple wrapper around the X509_STORE_CTX type, used during verification of /// a peer certificate. /** * @note The verify_context does not own the underlying X509_STORE_CTX object. */ class verify_context : private noncopyable { public: /// The native handle type of the verification context. typedef X509_STORE_CTX* native_handle_type; /// Constructor. explicit verify_context(native_handle_type handle) : handle_(handle) { } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. */ native_handle_type native_handle() { return handle_; } private: // The underlying native implementation. native_handle_type handle_; }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_VERIFY_CONTEXT_HPP galera-4-26.4.25/asio/asio/ssl/stream.hpp000644 000164 177776 00000064751 15107057155 021224 0ustar00jenkinsnogroup000000 000000 // // ssl/stream.hpp // ~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_STREAM_HPP #define ASIO_SSL_STREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/buffer_sequence_adapter.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/type_traits.hpp" #include "asio/ssl/context.hpp" #include "asio/ssl/detail/buffered_handshake_op.hpp" #include "asio/ssl/detail/handshake_op.hpp" #include "asio/ssl/detail/io.hpp" #include "asio/ssl/detail/read_op.hpp" #include "asio/ssl/detail/shutdown_op.hpp" #include "asio/ssl/detail/stream_core.hpp" #include "asio/ssl/detail/write_op.hpp" #include "asio/ssl/stream_base.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace ssl { /// Provides stream-oriented functionality using SSL. /** * The stream class template provides asynchronous and blocking stream-oriented * functionality using SSL. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. The application must also ensure that all * asynchronous operations are performed within the same implicit or explicit * strand. * * @par Example * To use the SSL stream template with an ip::tcp::socket, you would write: * @code * asio::io_context my_context; * asio::ssl::context ctx(asio::ssl::context::sslv23); * asio::ssl::stream sock(my_context, ctx); * @endcode * * @par Concepts: * AsyncReadStream, AsyncWriteStream, Stream, SyncReadStream, SyncWriteStream. */ template class stream : public stream_base, private noncopyable { public: /// The native handle type of the SSL stream. typedef SSL* native_handle_type; /// Structure for use with deprecated impl_type. struct impl_struct { SSL* ssl; }; /// The type of the next layer. typedef typename remove_reference::type next_layer_type; /// The type of the lowest layer. typedef typename next_layer_type::lowest_layer_type lowest_layer_type; /// The type of the executor associated with the object. typedef typename lowest_layer_type::executor_type executor_type; #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Construct a stream. /** * This constructor creates a stream and initialises the underlying stream * object. * * @param arg The argument to be passed to initialise the underlying stream. * * @param ctx The SSL context to be used for the stream. */ template stream(Arg&& arg, context& ctx) : next_layer_(ASIO_MOVE_CAST(Arg)(arg)), core_(ctx.native_handle(), next_layer_.lowest_layer().get_executor()) { } #else // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) template stream(Arg& arg, context& ctx) : next_layer_(arg), core_(ctx.native_handle(), next_layer_.lowest_layer().get_executor()) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destructor. /** * @note A @c stream object must not be destroyed while there are pending * asynchronous operations associated with it. */ ~stream() { } /// Get the executor associated with the object. /** * This function may be used to obtain the executor object that the stream * uses to dispatch handlers for asynchronous operations. * * @return A copy of the executor that stream will use to dispatch handlers. */ executor_type get_executor() ASIO_NOEXCEPT { return next_layer_.lowest_layer().get_executor(); } /// Get the underlying implementation in the native type. /** * This function may be used to obtain the underlying implementation of the * context. This is intended to allow access to context functionality that is * not otherwise provided. * * @par Example * The native_handle() function returns a pointer of type @c SSL* that is * suitable for passing to functions such as @c SSL_get_verify_result and * @c SSL_get_peer_certificate: * @code * asio::ssl::stream sock(my_context, ctx); * * // ... establish connection and perform handshake ... * * if (X509* cert = SSL_get_peer_certificate(sock.native_handle())) * { * if (SSL_get_verify_result(sock.native_handle()) == X509_V_OK) * { * // ... * } * } * @endcode */ native_handle_type native_handle() { return core_.engine_.native_handle(); } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const next_layer_type& next_layer() const { return next_layer_; } /// Get a reference to the next layer. /** * This function returns a reference to the next layer in a stack of stream * layers. * * @return A reference to the next layer in the stack of stream layers. * Ownership is not transferred to the caller. */ next_layer_type& next_layer() { return next_layer_; } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return next_layer_.lowest_layer(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * stream layers. * * @return A reference to the lowest layer in the stack of stream layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return next_layer_.lowest_layer(); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ void set_verify_mode(verify_mode v) { asio::error_code ec; set_verify_mode(v, ec); asio::detail::throw_error(ec, "set_verify_mode"); } /// Set the peer verification mode. /** * This function may be used to configure the peer verification mode used by * the stream. The new mode will override the mode inherited from the context. * * @param v A bitmask of peer verification modes. See @ref verify_mode for * available values. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ ASIO_SYNC_OP_VOID set_verify_mode( verify_mode v, asio::error_code& ec) { core_.engine_.set_verify_mode(v, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify_depth. */ void set_verify_depth(int depth) { asio::error_code ec; set_verify_depth(depth, ec); asio::detail::throw_error(ec, "set_verify_depth"); } /// Set the peer verification depth. /** * This function may be used to configure the maximum verification depth * allowed by the stream. * * @param depth Maximum depth for the certificate chain verification that * shall be allowed. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify_depth. */ ASIO_SYNC_OP_VOID set_verify_depth( int depth, asio::error_code& ec) { core_.engine_.set_verify_depth(depth, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @throws asio::system_error Thrown on failure. * * @note Calls @c SSL_set_verify. */ template void set_verify_callback(VerifyCallback callback) { asio::error_code ec; this->set_verify_callback(callback, ec); asio::detail::throw_error(ec, "set_verify_callback"); } /// Set the callback used to verify peer certificates. /** * This function is used to specify a callback function that will be called * by the implementation when it needs to verify a peer certificate. * * @param callback The function object to be used for verifying a certificate. * The function signature of the handler must be: * @code bool verify_callback( * bool preverified, // True if the certificate passed pre-verification. * verify_context& ctx // The peer certificate and other context. * ); @endcode * The return value of the callback is true if the certificate has passed * verification, false otherwise. * * @param ec Set to indicate what error occurred, if any. * * @note Calls @c SSL_set_verify. */ template ASIO_SYNC_OP_VOID set_verify_callback(VerifyCallback callback, asio::error_code& ec) { core_.engine_.set_verify_callback( new detail::verify_callback(callback), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @throws asio::system_error Thrown on failure. */ void handshake(handshake_type type) { asio::error_code ec; handshake(type, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID handshake(handshake_type type, asio::error_code& ec) { detail::io(next_layer_, core_, detail::handshake_op(type), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @throws asio::system_error Thrown on failure. */ template void handshake(handshake_type type, const ConstBufferSequence& buffers) { asio::error_code ec; handshake(type, buffers, ec); asio::detail::throw_error(ec, "handshake"); } /// Perform SSL handshaking. /** * This function is used to perform SSL handshaking on the stream. The * function call will block until handshaking is complete or an error occurs. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. * * @param ec Set to indicate what error occurred, if any. */ template ASIO_SYNC_OP_VOID handshake(handshake_type type, const ConstBufferSequence& buffers, asio::error_code& ec) { detail::io(next_layer_, core_, detail::buffered_handshake_op(type, buffers), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(HandshakeHandler, void (asio::error_code)) async_handshake(handshake_type type, ASIO_MOVE_ARG(HandshakeHandler) handler) { return async_initiate( initiate_async_handshake(), handler, this, type); } /// Start an asynchronous SSL handshake. /** * This function is used to asynchronously perform an SSL handshake on the * stream. This function call always returns immediately. * * @param type The type of handshaking to be performed, i.e. as a client or as * a server. * * @param buffers The buffered data to be reused for the handshake. Although * the buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Amount of buffers used in handshake. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(BufferedHandshakeHandler, void (asio::error_code, std::size_t)) async_handshake(handshake_type type, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(BufferedHandshakeHandler) handler) { return async_initiate( initiate_async_buffered_handshake(), handler, this, type, buffers); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @throws asio::system_error Thrown on failure. */ void shutdown() { asio::error_code ec; shutdown(ec); asio::detail::throw_error(ec, "shutdown"); } /// Shut down SSL on the stream. /** * This function is used to shut down SSL on the stream. The function call * will block until SSL has been shut down or an error occurs. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID shutdown(asio::error_code& ec) { detail::io(next_layer_, core_, detail::shutdown_op(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Asynchronously shut down SSL on the stream. /** * This function is used to asynchronously shut down SSL on the stream. This * function call always returns immediately. * * @param handler The handler to be called when the handshake operation * completes. Copies will be made of the handler as required. The equivalent * function signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode */ template ASIO_INITFN_RESULT_TYPE(ShutdownHandler, void (asio::error_code)) async_shutdown(ASIO_MOVE_ARG(ShutdownHandler) handler) { return async_initiate( initiate_async_shutdown(), handler, this); } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t n = write_some(buffers, ec); asio::detail::throw_error(ec, "write_some"); return n; } /// Write some data to the stream. /** * This function is used to write data on the stream. The function call will * block until one or more bytes of data has been written successfully, or * until an error occurs. * * @param buffers The data to be written to the stream. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that all * data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::write_op(buffers), ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write one or more bytes of data to * the stream. The function call always returns immediately. * * @param buffers The data to be written to the stream. Although the buffers * object may be copied as necessary, ownership of the underlying buffers is * retained by the caller, which must guarantee that they remain valid until * the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * * @note The async_write_some operation may not transmit all of the data to * the peer. Consider using the @ref async_write function if you need to * ensure that all data is written before the asynchronous operation * completes. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_write_some(), handler, this, buffers); } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t n = read_some(buffers, ec); asio::detail::throw_error(ec, "read_some"); return n; } /// Read some data from the stream. /** * This function is used to read data from the stream. The function call will * block until one or more bytes of data has been read successfully, or until * an error occurs. * * @param buffers The buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that the * requested amount of data is read before the blocking operation completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return detail::io(next_layer_, core_, detail::read_op(buffers), ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read one or more bytes of data from * the stream. The function call always returns immediately. * * @param buffers The buffers into which the data will be read. Although the * buffers object may be copied as necessary, ownership of the underlying * buffers is retained by the caller, which must guarantee that they remain * valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The equivalent function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * * @note The async_read_some operation may not read all of the requested * number of bytes. Consider using the @ref async_read function if you need to * ensure that the requested amount of data is read before the asynchronous * operation completes. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_read_some(), handler, this, buffers); } private: struct initiate_async_handshake { template void operator()(ASIO_MOVE_ARG(HandshakeHandler) handler, stream* self, handshake_type type) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a HandshakeHandler. ASIO_HANDSHAKE_HANDLER_CHECK(HandshakeHandler, handler) type_check; asio::detail::non_const_lvalue handler2(handler); detail::async_io(self->next_layer_, self->core_, detail::handshake_op(type), handler2.value); } }; struct initiate_async_buffered_handshake { template void operator()(ASIO_MOVE_ARG(BufferedHandshakeHandler) handler, stream* self, handshake_type type, const ConstBufferSequence& buffers) const { // If you get an error on the following line it means that your // handler does not meet the documented type requirements for a // BufferedHandshakeHandler. ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( BufferedHandshakeHandler, handler) type_check; asio::detail::non_const_lvalue< BufferedHandshakeHandler> handler2(handler); detail::async_io(self->next_layer_, self->core_, detail::buffered_handshake_op(type, buffers), handler2.value); } }; struct initiate_async_shutdown { template void operator()(ASIO_MOVE_ARG(ShutdownHandler) handler, stream* self) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ShutdownHandler. ASIO_HANDSHAKE_HANDLER_CHECK(ShutdownHandler, handler) type_check; asio::detail::non_const_lvalue handler2(handler); detail::async_io(self->next_layer_, self->core_, detail::shutdown_op(), handler2.value); } }; struct initiate_async_write_some { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, stream* self, const ConstBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; asio::detail::non_const_lvalue handler2(handler); detail::async_io(self->next_layer_, self->core_, detail::write_op(buffers), handler2.value); } }; struct initiate_async_read_some { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, stream* self, const MutableBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; asio::detail::non_const_lvalue handler2(handler); detail::async_io(self->next_layer_, self->core_, detail::read_op(buffers), handler2.value); } }; Stream next_layer_; detail::stream_core core_; }; } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_SSL_STREAM_HPP galera-4-26.4.25/asio/asio/ssl/error.hpp000644 000164 177776 00000005511 15107057155 021047 0ustar00jenkinsnogroup000000 000000 // // ssl/error.hpp // ~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SSL_ERROR_HPP #define ASIO_SSL_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/ssl/detail/openssl_types.hpp" #include "asio/detail/push_options.hpp" namespace asio { namespace error { enum ssl_errors { // Error numbers are those produced by openssl. }; extern ASIO_DECL const asio::error_category& get_ssl_category(); static const asio::error_category& ssl_category ASIO_UNUSED_VARIABLE = asio::error::get_ssl_category(); } // namespace error namespace ssl { namespace error { enum stream_errors { #if defined(GENERATING_DOCUMENTATION) /// The underlying stream closed before the ssl stream gracefully shut down. stream_truncated, /// The underlying SSL library returned a system error without providing /// further information. unspecified_system_error, /// The underlying SSL library generated an unexpected result from a function /// call. unexpected_result #else // defined(GENERATING_DOCUMENTATION) # if (OPENSSL_VERSION_NUMBER < 0x10100000L) \ && !defined(OPENSSL_IS_BORINGSSL) \ && !defined(ASIO_USE_WOLFSSL) stream_truncated = ERR_PACK(ERR_LIB_SSL, 0, SSL_R_SHORT_READ), # else stream_truncated = 1, # endif unspecified_system_error = 2, unexpected_result = 3 #endif // defined(GENERATING_DOCUMENTATION) }; extern ASIO_DECL const asio::error_category& get_stream_category(); static const asio::error_category& stream_category ASIO_UNUSED_VARIABLE = asio::ssl::error::get_stream_category(); } // namespace error } // namespace ssl } // namespace asio #if defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace std { template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; } // namespace std #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace asio { namespace error { inline asio::error_code make_error_code(ssl_errors e) { return asio::error_code( static_cast(e), get_ssl_category()); } } // namespace error namespace ssl { namespace error { inline asio::error_code make_error_code(stream_errors e) { return asio::error_code( static_cast(e), get_stream_category()); } } // namespace error } // namespace ssl } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/ssl/impl/error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_SSL_ERROR_HPP galera-4-26.4.25/asio/asio/error.hpp000644 000164 177776 00000022311 15107057155 020243 0ustar00jenkinsnogroup000000 000000 // // error.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_ERROR_HPP #define ASIO_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/error_code.hpp" #include "asio/system_error.hpp" #if defined(ASIO_WINDOWS) \ || defined(__CYGWIN__) \ || defined(ASIO_WINDOWS_RUNTIME) # include #else # include # include #endif #if defined(GENERATING_DOCUMENTATION) /// INTERNAL ONLY. # define ASIO_NATIVE_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_SOCKET_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_NETDB_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_GETADDRINFO_ERROR(e) implementation_defined /// INTERNAL ONLY. # define ASIO_WIN_OR_POSIX(e_win, e_posix) implementation_defined #elif defined(ASIO_WINDOWS_RUNTIME) # define ASIO_NATIVE_ERROR(e) __HRESULT_FROM_WIN32(e) # define ASIO_SOCKET_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_NETDB_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_GETADDRINFO_ERROR(e) __HRESULT_FROM_WIN32(WSA ## e) # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win #elif defined(ASIO_WINDOWS) || defined(__CYGWIN__) # define ASIO_NATIVE_ERROR(e) e # define ASIO_SOCKET_ERROR(e) WSA ## e # define ASIO_NETDB_ERROR(e) WSA ## e # define ASIO_GETADDRINFO_ERROR(e) WSA ## e # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_win #else # define ASIO_NATIVE_ERROR(e) e # define ASIO_SOCKET_ERROR(e) e # define ASIO_NETDB_ERROR(e) e # define ASIO_GETADDRINFO_ERROR(e) e # define ASIO_WIN_OR_POSIX(e_win, e_posix) e_posix #endif #include "asio/detail/push_options.hpp" namespace asio { namespace error { enum basic_errors { /// Permission denied. access_denied = ASIO_SOCKET_ERROR(EACCES), /// Address family not supported by protocol. address_family_not_supported = ASIO_SOCKET_ERROR(EAFNOSUPPORT), /// Address already in use. address_in_use = ASIO_SOCKET_ERROR(EADDRINUSE), /// Transport endpoint is already connected. already_connected = ASIO_SOCKET_ERROR(EISCONN), /// Operation already in progress. already_started = ASIO_SOCKET_ERROR(EALREADY), /// Broken pipe. broken_pipe = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_BROKEN_PIPE), ASIO_NATIVE_ERROR(EPIPE)), /// A connection has been aborted. connection_aborted = ASIO_SOCKET_ERROR(ECONNABORTED), /// Connection refused. connection_refused = ASIO_SOCKET_ERROR(ECONNREFUSED), /// Connection reset by peer. connection_reset = ASIO_SOCKET_ERROR(ECONNRESET), /// Bad file descriptor. bad_descriptor = ASIO_SOCKET_ERROR(EBADF), /// Bad address. fault = ASIO_SOCKET_ERROR(EFAULT), /// No route to host. host_unreachable = ASIO_SOCKET_ERROR(EHOSTUNREACH), /// Operation now in progress. in_progress = ASIO_SOCKET_ERROR(EINPROGRESS), /// Interrupted system call. interrupted = ASIO_SOCKET_ERROR(EINTR), /// Invalid argument. invalid_argument = ASIO_SOCKET_ERROR(EINVAL), /// Message too long. message_size = ASIO_SOCKET_ERROR(EMSGSIZE), /// The name was too long. name_too_long = ASIO_SOCKET_ERROR(ENAMETOOLONG), /// Network is down. network_down = ASIO_SOCKET_ERROR(ENETDOWN), /// Network dropped connection on reset. network_reset = ASIO_SOCKET_ERROR(ENETRESET), /// Network is unreachable. network_unreachable = ASIO_SOCKET_ERROR(ENETUNREACH), /// Too many open files. no_descriptors = ASIO_SOCKET_ERROR(EMFILE), /// No buffer space available. no_buffer_space = ASIO_SOCKET_ERROR(ENOBUFS), /// Cannot allocate memory. no_memory = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_OUTOFMEMORY), ASIO_NATIVE_ERROR(ENOMEM)), /// Operation not permitted. no_permission = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_ACCESS_DENIED), ASIO_NATIVE_ERROR(EPERM)), /// Protocol not available. no_protocol_option = ASIO_SOCKET_ERROR(ENOPROTOOPT), /// No such device. no_such_device = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_BAD_UNIT), ASIO_NATIVE_ERROR(ENODEV)), /// Transport endpoint is not connected. not_connected = ASIO_SOCKET_ERROR(ENOTCONN), /// Socket operation on non-socket. not_socket = ASIO_SOCKET_ERROR(ENOTSOCK), /// Operation cancelled. operation_aborted = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_OPERATION_ABORTED), ASIO_NATIVE_ERROR(ECANCELED)), /// Operation not supported. operation_not_supported = ASIO_SOCKET_ERROR(EOPNOTSUPP), /// Cannot send after transport endpoint shutdown. shut_down = ASIO_SOCKET_ERROR(ESHUTDOWN), /// Connection timed out. timed_out = ASIO_SOCKET_ERROR(ETIMEDOUT), /// Resource temporarily unavailable. try_again = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(ERROR_RETRY), ASIO_NATIVE_ERROR(EAGAIN)), /// The socket is marked non-blocking and the requested operation would block. would_block = ASIO_SOCKET_ERROR(EWOULDBLOCK) }; enum netdb_errors { /// Host not found (authoritative). host_not_found = ASIO_NETDB_ERROR(HOST_NOT_FOUND), /// Host not found (non-authoritative). host_not_found_try_again = ASIO_NETDB_ERROR(TRY_AGAIN), /// The query is valid but does not have associated address data. no_data = ASIO_NETDB_ERROR(NO_DATA), /// A non-recoverable error occurred. no_recovery = ASIO_NETDB_ERROR(NO_RECOVERY) }; enum addrinfo_errors { /// The service is not supported for the given socket type. service_not_found = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(WSATYPE_NOT_FOUND), ASIO_GETADDRINFO_ERROR(EAI_SERVICE)), /// The socket type is not supported. socket_type_not_supported = ASIO_WIN_OR_POSIX( ASIO_NATIVE_ERROR(WSAESOCKTNOSUPPORT), ASIO_GETADDRINFO_ERROR(EAI_SOCKTYPE)) }; enum misc_errors { /// Already open. already_open = 1, /// End of file or stream. eof, /// Element not found. not_found, /// The descriptor cannot fit into the select system call's fd_set. fd_set_failure }; inline const asio::error_category& get_system_category() { return asio::system_category(); } #if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) extern ASIO_DECL const asio::error_category& get_netdb_category(); extern ASIO_DECL const asio::error_category& get_addrinfo_category(); #else // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) inline const asio::error_category& get_netdb_category() { return get_system_category(); } inline const asio::error_category& get_addrinfo_category() { return get_system_category(); } #endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__) extern ASIO_DECL const asio::error_category& get_misc_category(); static const asio::error_category& system_category ASIO_UNUSED_VARIABLE = asio::error::get_system_category(); static const asio::error_category& netdb_category ASIO_UNUSED_VARIABLE = asio::error::get_netdb_category(); static const asio::error_category& addrinfo_category ASIO_UNUSED_VARIABLE = asio::error::get_addrinfo_category(); static const asio::error_category& misc_category ASIO_UNUSED_VARIABLE = asio::error::get_misc_category(); } // namespace error } // namespace asio #if defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace std { template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; template<> struct is_error_code_enum { static const bool value = true; }; } // namespace std #endif // defined(ASIO_HAS_STD_SYSTEM_ERROR) namespace asio { namespace error { inline asio::error_code make_error_code(basic_errors e) { return asio::error_code( static_cast(e), get_system_category()); } inline asio::error_code make_error_code(netdb_errors e) { return asio::error_code( static_cast(e), get_netdb_category()); } inline asio::error_code make_error_code(addrinfo_errors e) { return asio::error_code( static_cast(e), get_addrinfo_category()); } inline asio::error_code make_error_code(misc_errors e) { return asio::error_code( static_cast(e), get_misc_category()); } } // namespace error namespace stream_errc { // Simulates the proposed stream_errc scoped enum. using error::eof; using error::not_found; } // namespace stream_errc namespace socket_errc { // Simulates the proposed socket_errc scoped enum. using error::already_open; using error::not_found; } // namespace socket_errc namespace resolver_errc { // Simulates the proposed resolver_errc scoped enum. using error::host_not_found; const error::netdb_errors try_again = error::host_not_found_try_again; using error::service_not_found; } // namespace resolver_errc } // namespace asio #include "asio/detail/pop_options.hpp" #undef ASIO_NATIVE_ERROR #undef ASIO_SOCKET_ERROR #undef ASIO_NETDB_ERROR #undef ASIO_GETADDRINFO_ERROR #undef ASIO_WIN_OR_POSIX #if defined(ASIO_HEADER_ONLY) # include "asio/impl/error.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_ERROR_HPP galera-4-26.4.25/asio/asio/basic_socket_acceptor.hpp000644 000164 177776 00000235247 15107057155 023441 0ustar00jenkinsnogroup000000 000000 // // basic_socket_acceptor.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_ACCEPTOR_HPP #define ASIO_BASIC_SOCKET_ACCEPTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/basic_socket.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/socket_base.hpp" #if defined(ASIO_WINDOWS_RUNTIME) # include "asio/detail/null_socket_service.hpp" #elif defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_socket_service.hpp" #else # include "asio/detail/reactive_socket_service.hpp" #endif #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL) #define ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL // Forward declaration with defaulted arguments. template class basic_socket_acceptor; #endif // !defined(ASIO_BASIC_SOCKET_ACCEPTOR_FWD_DECL) /// Provides the ability to accept new connections. /** * The basic_socket_acceptor class template is used for accepting new socket * connections. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Example * Opening a socket acceptor with the SO_REUSEADDR option enabled: * @code * asio::ip::tcp::acceptor acceptor(my_context); * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), port); * acceptor.open(endpoint.protocol()); * acceptor.set_option(asio::ip::tcp::acceptor::reuse_address(true)); * acceptor.bind(endpoint); * acceptor.listen(); * @endcode */ template class basic_socket_acceptor : public socket_base { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of an acceptor. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #elif defined(ASIO_WINDOWS_RUNTIME) typedef typename detail::null_socket_service< Protocol>::native_handle_type native_handle_type; #elif defined(ASIO_HAS_IOCP) typedef typename detail::win_iocp_socket_service< Protocol>::native_handle_type native_handle_type; #else typedef typename detail::reactive_socket_service< Protocol>::native_handle_type native_handle_type; #endif /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// Construct an acceptor without opening it. /** * This constructor creates an acceptor without opening it to listen for new * connections. The open() function must be called before the acceptor can * accept new socket connections. * * @param ex The I/O executor that the acceptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * acceptor. */ explicit basic_socket_acceptor(const executor_type& ex) : impl_(ex) { } /// Construct an acceptor without opening it. /** * This constructor creates an acceptor without opening it to listen for new * connections. The open() function must be called before the acceptor can * accept new socket connections. * * @param context An execution context which provides the I/O executor that * the acceptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the acceptor. */ template explicit basic_socket_acceptor(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Construct an open acceptor. /** * This constructor creates an acceptor and automatically opens it. * * @param ex The I/O executor that the acceptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * acceptor. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ basic_socket_acceptor(const executor_type& ex, const protocol_type& protocol) : impl_(ex) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct an open acceptor. /** * This constructor creates an acceptor and automatically opens it. * * @param context An execution context which provides the I/O executor that * the acceptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the acceptor. * * @param protocol An object specifying protocol parameters to be used. * * @throws asio::system_error Thrown on failure. */ template basic_socket_acceptor(ExecutionContext& context, const protocol_type& protocol, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Construct an acceptor opened on the given endpoint. /** * This constructor creates an acceptor and automatically opens it to listen * for new connections on the specified endpoint. * * @param ex The I/O executor that the acceptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * acceptor. * * @param endpoint An endpoint on the local machine on which the acceptor * will listen for new connections. * * @param reuse_addr Whether the constructor should set the socket option * socket_base::reuse_address. * * @throws asio::system_error Thrown on failure. * * @note This constructor is equivalent to the following code: * @code * basic_socket_acceptor acceptor(my_context); * acceptor.open(endpoint.protocol()); * if (reuse_addr) * acceptor.set_option(socket_base::reuse_address(true)); * acceptor.bind(endpoint); * acceptor.listen(); * @endcode */ basic_socket_acceptor(const executor_type& ex, const endpoint_type& endpoint, bool reuse_addr = true) : impl_(ex) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); if (reuse_addr) { impl_.get_service().set_option(impl_.get_implementation(), socket_base::reuse_address(true), ec); asio::detail::throw_error(ec, "set_option"); } impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); impl_.get_service().listen(impl_.get_implementation(), socket_base::max_listen_connections, ec); asio::detail::throw_error(ec, "listen"); } /// Construct an acceptor opened on the given endpoint. /** * This constructor creates an acceptor and automatically opens it to listen * for new connections on the specified endpoint. * * @param context An execution context which provides the I/O executor that * the acceptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the acceptor. * * @param endpoint An endpoint on the local machine on which the acceptor * will listen for new connections. * * @param reuse_addr Whether the constructor should set the socket option * socket_base::reuse_address. * * @throws asio::system_error Thrown on failure. * * @note This constructor is equivalent to the following code: * @code * basic_socket_acceptor acceptor(my_context); * acceptor.open(endpoint.protocol()); * if (reuse_addr) * acceptor.set_option(socket_base::reuse_address(true)); * acceptor.bind(endpoint); * acceptor.listen(); * @endcode */ template basic_socket_acceptor(ExecutionContext& context, const endpoint_type& endpoint, bool reuse_addr = true, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; const protocol_type protocol = endpoint.protocol(); impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); if (reuse_addr) { impl_.get_service().set_option(impl_.get_implementation(), socket_base::reuse_address(true), ec); asio::detail::throw_error(ec, "set_option"); } impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); impl_.get_service().listen(impl_.get_implementation(), socket_base::max_listen_connections, ec); asio::detail::throw_error(ec, "listen"); } /// Construct a basic_socket_acceptor on an existing native acceptor. /** * This constructor creates an acceptor object to hold an existing native * acceptor. * * @param ex The I/O executor that the acceptor will use, by default, to * dispatch handlers for any asynchronous operations performed on the * acceptor. * * @param protocol An object specifying protocol parameters to be used. * * @param native_acceptor A native acceptor. * * @throws asio::system_error Thrown on failure. */ basic_socket_acceptor(const executor_type& ex, const protocol_type& protocol, const native_handle_type& native_acceptor) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_acceptor, ec); asio::detail::throw_error(ec, "assign"); } /// Construct a basic_socket_acceptor on an existing native acceptor. /** * This constructor creates an acceptor object to hold an existing native * acceptor. * * @param context An execution context which provides the I/O executor that * the acceptor will use, by default, to dispatch handlers for any * asynchronous operations performed on the acceptor. * * @param protocol An object specifying protocol parameters to be used. * * @param native_acceptor A native acceptor. * * @throws asio::system_error Thrown on failure. */ template basic_socket_acceptor(ExecutionContext& context, const protocol_type& protocol, const native_handle_type& native_acceptor, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_acceptor, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_socket_acceptor from another. /** * This constructor moves an acceptor from one object to another. * * @param other The other basic_socket_acceptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket_acceptor(const executor_type&) * constructor. */ basic_socket_acceptor(basic_socket_acceptor&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_socket_acceptor from another. /** * This assignment operator moves an acceptor from one object to another. * * @param other The other basic_socket_acceptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket_acceptor(const executor_type&) * constructor. */ basic_socket_acceptor& operator=(basic_socket_acceptor&& other) { impl_ = std::move(other.impl_); return *this; } // All socket acceptors have access to each other's implementations. template friend class basic_socket_acceptor; /// Move-construct a basic_socket_acceptor from an acceptor of another /// protocol type. /** * This constructor moves an acceptor from one object to another. * * @param other The other basic_socket_acceptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket_acceptor(const executor_type&) * constructor. */ template basic_socket_acceptor(basic_socket_acceptor&& other, typename enable_if< is_convertible::value && is_convertible::value >::type* = 0) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_socket_acceptor from an acceptor of another protocol /// type. /** * This assignment operator moves an acceptor from one object to another. * * @param other The other basic_socket_acceptor object from which the move * will occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_socket_acceptor(const executor_type&) * constructor. */ template typename enable_if< is_convertible::value && is_convertible::value, basic_socket_acceptor& >::type operator=(basic_socket_acceptor&& other) { basic_socket_acceptor tmp(std::move(other)); impl_ = std::move(tmp.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the acceptor. /** * This function destroys the acceptor, cancelling any outstanding * asynchronous operations associated with the acceptor as if by calling * @c cancel. */ ~basic_socket_acceptor() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Open the acceptor using the specified protocol. /** * This function opens the socket acceptor so that it will use the specified * protocol. * * @param protocol An object specifying which protocol is to be used. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * acceptor.open(asio::ip::tcp::v4()); * @endcode */ void open(const protocol_type& protocol = protocol_type()) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), protocol, ec); asio::detail::throw_error(ec, "open"); } /// Open the acceptor using the specified protocol. /** * This function opens the socket acceptor so that it will use the specified * protocol. * * @param protocol An object specifying which protocol is to be used. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * asio::error_code ec; * acceptor.open(asio::ip::tcp::v4(), ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID open(const protocol_type& protocol, asio::error_code& ec) { impl_.get_service().open(impl_.get_implementation(), protocol, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Assigns an existing native acceptor to the acceptor. /* * This function opens the acceptor to hold an existing native acceptor. * * @param protocol An object specifying which protocol is to be used. * * @param native_acceptor A native acceptor. * * @throws asio::system_error Thrown on failure. */ void assign(const protocol_type& protocol, const native_handle_type& native_acceptor) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), protocol, native_acceptor, ec); asio::detail::throw_error(ec, "assign"); } /// Assigns an existing native acceptor to the acceptor. /* * This function opens the acceptor to hold an existing native acceptor. * * @param protocol An object specifying which protocol is to be used. * * @param native_acceptor A native acceptor. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const protocol_type& protocol, const native_handle_type& native_acceptor, asio::error_code& ec) { impl_.get_service().assign(impl_.get_implementation(), protocol, native_acceptor, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the acceptor is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Bind the acceptor to the given local endpoint. /** * This function binds the socket acceptor to the specified endpoint on the * local machine. * * @param endpoint An endpoint on the local machine to which the socket * acceptor will be bound. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); * acceptor.open(endpoint.protocol()); * acceptor.bind(endpoint); * @endcode */ void bind(const endpoint_type& endpoint) { asio::error_code ec; impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); asio::detail::throw_error(ec, "bind"); } /// Bind the acceptor to the given local endpoint. /** * This function binds the socket acceptor to the specified endpoint on the * local machine. * * @param endpoint An endpoint on the local machine to which the socket * acceptor will be bound. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * asio::ip::tcp::endpoint endpoint(asio::ip::tcp::v4(), 12345); * acceptor.open(endpoint.protocol()); * asio::error_code ec; * acceptor.bind(endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID bind(const endpoint_type& endpoint, asio::error_code& ec) { impl_.get_service().bind(impl_.get_implementation(), endpoint, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Place the acceptor into the state where it will listen for new /// connections. /** * This function puts the socket acceptor into the state where it may accept * new connections. * * @param backlog The maximum length of the queue of pending connections. * * @throws asio::system_error Thrown on failure. */ void listen(int backlog = socket_base::max_listen_connections) { asio::error_code ec; impl_.get_service().listen(impl_.get_implementation(), backlog, ec); asio::detail::throw_error(ec, "listen"); } /// Place the acceptor into the state where it will listen for new /// connections. /** * This function puts the socket acceptor into the state where it may accept * new connections. * * @param backlog The maximum length of the queue of pending connections. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::error_code ec; * acceptor.listen(asio::socket_base::max_listen_connections, ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID listen(int backlog, asio::error_code& ec) { impl_.get_service().listen(impl_.get_implementation(), backlog, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Close the acceptor. /** * This function is used to close the acceptor. Any asynchronous accept * operations will be cancelled immediately. * * A subsequent call to open() is required before the acceptor can again be * used to again perform socket accept operations. * * @throws asio::system_error Thrown on failure. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the acceptor. /** * This function is used to close the acceptor. Any asynchronous accept * operations will be cancelled immediately. * * A subsequent call to open() is required before the acceptor can again be * used to again perform socket accept operations. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::error_code ec; * acceptor.close(ec); * if (ec) * { * // An error occurred. * } * @endcode */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Release ownership of the underlying native acceptor. /** * This function causes all outstanding asynchronous accept operations to * finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. Ownership of the * native acceptor is then transferred to the caller. * * @throws asio::system_error Thrown on failure. * * @note This function is unsupported on Windows versions prior to Windows * 8.1, and will fail with asio::error::operation_not_supported on * these platforms. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603) __declspec(deprecated("This function always fails with " "operation_not_supported when used on Windows versions " "prior to Windows 8.1.")) #endif native_handle_type release() { asio::error_code ec; native_handle_type s = impl_.get_service().release( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "release"); return s; } /// Release ownership of the underlying native acceptor. /** * This function causes all outstanding asynchronous accept operations to * finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. Ownership of the * native acceptor is then transferred to the caller. * * @param ec Set to indicate what error occurred, if any. * * @note This function is unsupported on Windows versions prior to Windows * 8.1, and will fail with asio::error::operation_not_supported on * these platforms. */ #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1400) \ && (!defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0603) __declspec(deprecated("This function always fails with " "operation_not_supported when used on Windows versions " "prior to Windows 8.1.")) #endif native_handle_type release(asio::error_code& ec) { return impl_.get_service().release(impl_.get_implementation(), ec); } /// Get the native acceptor representation. /** * This function may be used to obtain the underlying representation of the * acceptor. This is intended to allow access to native acceptor functionality * that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the acceptor. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the acceptor. /** * This function causes all outstanding asynchronous connect, send and receive * operations to finish immediately, and the handlers for cancelled operations * will be passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Set an option on the acceptor. /** * This function is used to set an option on the acceptor. * * @param option The new option value to be set on the acceptor. * * @throws asio::system_error Thrown on failure. * * @sa SettableSocketOption @n * asio::socket_base::reuse_address * asio::socket_base::enable_connection_aborted * * @par Example * Setting the SOL_SOCKET/SO_REUSEADDR option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::reuse_address option(true); * acceptor.set_option(option); * @endcode */ template void set_option(const SettableSocketOption& option) { asio::error_code ec; impl_.get_service().set_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } /// Set an option on the acceptor. /** * This function is used to set an option on the acceptor. * * @param option The new option value to be set on the acceptor. * * @param ec Set to indicate what error occurred, if any. * * @sa SettableSocketOption @n * asio::socket_base::reuse_address * asio::socket_base::enable_connection_aborted * * @par Example * Setting the SOL_SOCKET/SO_REUSEADDR option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::reuse_address option(true); * asio::error_code ec; * acceptor.set_option(option, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template ASIO_SYNC_OP_VOID set_option(const SettableSocketOption& option, asio::error_code& ec) { impl_.get_service().set_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get an option from the acceptor. /** * This function is used to get the current value of an option on the * acceptor. * * @param option The option value to be obtained from the acceptor. * * @throws asio::system_error Thrown on failure. * * @sa GettableSocketOption @n * asio::socket_base::reuse_address * * @par Example * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::reuse_address option; * acceptor.get_option(option); * bool is_set = option.get(); * @endcode */ template void get_option(GettableSocketOption& option) const { asio::error_code ec; impl_.get_service().get_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } /// Get an option from the acceptor. /** * This function is used to get the current value of an option on the * acceptor. * * @param option The option value to be obtained from the acceptor. * * @param ec Set to indicate what error occurred, if any. * * @sa GettableSocketOption @n * asio::socket_base::reuse_address * * @par Example * Getting the value of the SOL_SOCKET/SO_REUSEADDR option: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::reuse_address option; * asio::error_code ec; * acceptor.get_option(option, ec); * if (ec) * { * // An error occurred. * } * bool is_set = option.get(); * @endcode */ template ASIO_SYNC_OP_VOID get_option(GettableSocketOption& option, asio::error_code& ec) const { impl_.get_service().get_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Perform an IO control command on the acceptor. /** * This function is used to execute an IO control command on the acceptor. * * @param command The IO control command to be performed on the acceptor. * * @throws asio::system_error Thrown on failure. * * @sa IoControlCommand @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::non_blocking_io command(true); * socket.io_control(command); * @endcode */ template void io_control(IoControlCommand& command) { asio::error_code ec; impl_.get_service().io_control(impl_.get_implementation(), command, ec); asio::detail::throw_error(ec, "io_control"); } /// Perform an IO control command on the acceptor. /** * This function is used to execute an IO control command on the acceptor. * * @param command The IO control command to be performed on the acceptor. * * @param ec Set to indicate what error occurred, if any. * * @sa IoControlCommand @n * asio::socket_base::non_blocking_io * * @par Example * Getting the number of bytes ready to read: * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::acceptor::non_blocking_io command(true); * asio::error_code ec; * socket.io_control(command, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template ASIO_SYNC_OP_VOID io_control(IoControlCommand& command, asio::error_code& ec) { impl_.get_service().io_control(impl_.get_implementation(), command, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the acceptor. /** * @returns @c true if the acceptor's synchronous operations will fail with * asio::error::would_block if they are unable to perform the requested * operation immediately. If @c false, synchronous operations will block * until complete. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ bool non_blocking() const { return impl_.get_service().non_blocking(impl_.get_implementation()); } /// Sets the non-blocking mode of the acceptor. /** * @param mode If @c true, the acceptor's synchronous operations will fail * with asio::error::would_block if they are unable to perform the * requested operation immediately. If @c false, synchronous operations will * block until complete. * * @throws asio::system_error Thrown on failure. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ void non_blocking(bool mode) { asio::error_code ec; impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "non_blocking"); } /// Sets the non-blocking mode of the acceptor. /** * @param mode If @c true, the acceptor's synchronous operations will fail * with asio::error::would_block if they are unable to perform the * requested operation immediately. If @c false, synchronous operations will * block until complete. * * @param ec Set to indicate what error occurred, if any. * * @note The non-blocking mode has no effect on the behaviour of asynchronous * operations. Asynchronous operations will never fail with the error * asio::error::would_block. */ ASIO_SYNC_OP_VOID non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().non_blocking(impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Gets the non-blocking mode of the native acceptor implementation. /** * This function is used to retrieve the non-blocking mode of the underlying * native acceptor. This mode has no effect on the behaviour of the acceptor * object's synchronous operations. * * @returns @c true if the underlying acceptor is in non-blocking mode and * direct system calls may fail with asio::error::would_block (or the * equivalent system error). * * @note The current non-blocking mode is cached by the acceptor object. * Consequently, the return value may be incorrect if the non-blocking mode * was set directly on the native acceptor. */ bool native_non_blocking() const { return impl_.get_service().native_non_blocking(impl_.get_implementation()); } /// Sets the non-blocking mode of the native acceptor implementation. /** * This function is used to modify the non-blocking mode of the underlying * native acceptor. It has no effect on the behaviour of the acceptor object's * synchronous operations. * * @param mode If @c true, the underlying acceptor is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @throws asio::system_error Thrown on failure. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. */ void native_non_blocking(bool mode) { asio::error_code ec; impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); asio::detail::throw_error(ec, "native_non_blocking"); } /// Sets the non-blocking mode of the native acceptor implementation. /** * This function is used to modify the non-blocking mode of the underlying * native acceptor. It has no effect on the behaviour of the acceptor object's * synchronous operations. * * @param mode If @c true, the underlying acceptor is put into non-blocking * mode and direct system calls may fail with asio::error::would_block * (or the equivalent system error). * * @param ec Set to indicate what error occurred, if any. If the @c mode is * @c false, but the current value of @c non_blocking() is @c true, this * function fails with asio::error::invalid_argument, as the * combination does not make sense. */ ASIO_SYNC_OP_VOID native_non_blocking( bool mode, asio::error_code& ec) { impl_.get_service().native_non_blocking( impl_.get_implementation(), mode, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the local endpoint of the acceptor. /** * This function is used to obtain the locally bound endpoint of the acceptor. * * @returns An object that represents the local endpoint of the acceptor. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(); * @endcode */ endpoint_type local_endpoint() const { asio::error_code ec; endpoint_type ep = impl_.get_service().local_endpoint( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "local_endpoint"); return ep; } /// Get the local endpoint of the acceptor. /** * This function is used to obtain the locally bound endpoint of the acceptor. * * @param ec Set to indicate what error occurred, if any. * * @returns An object that represents the local endpoint of the acceptor. * Returns a default-constructed endpoint object if an error occurred and the * error handler did not throw an exception. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::error_code ec; * asio::ip::tcp::endpoint endpoint = acceptor.local_endpoint(ec); * if (ec) * { * // An error occurred. * } * @endcode */ endpoint_type local_endpoint(asio::error_code& ec) const { return impl_.get_service().local_endpoint(impl_.get_implementation(), ec); } /// Wait for the acceptor to become ready to read, ready to write, or to have /// pending error conditions. /** * This function is used to perform a blocking wait for an acceptor to enter * a ready to read, write or error condition state. * * @param w Specifies the desired acceptor state. * * @par Example * Waiting for an acceptor to become readable. * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * acceptor.wait(asio::ip::tcp::acceptor::wait_read); * @endcode */ void wait(wait_type w) { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), w, ec); asio::detail::throw_error(ec, "wait"); } /// Wait for the acceptor to become ready to read, ready to write, or to have /// pending error conditions. /** * This function is used to perform a blocking wait for an acceptor to enter * a ready to read, write or error condition state. * * @param w Specifies the desired acceptor state. * * @param ec Set to indicate what error occurred, if any. * * @par Example * Waiting for an acceptor to become readable. * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::error_code ec; * acceptor.wait(asio::ip::tcp::acceptor::wait_read, ec); * @endcode */ ASIO_SYNC_OP_VOID wait(wait_type w, asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), w, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Asynchronously wait for the acceptor to become ready to read, ready to /// write, or to have pending error conditions. /** * This function is used to perform an asynchronous wait for an acceptor to * enter a ready to read, write or error condition state. * * @param w Specifies the desired acceptor state. * * @param handler The handler to be called when the wait operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error // Result of operation * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void wait_handler(const asio::error_code& error) * { * if (!error) * { * // Wait succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * acceptor.async_wait( * asio::ip::tcp::acceptor::wait_read, * wait_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(wait_type w, ASIO_MOVE_ARG(WaitHandler) handler) { return async_initiate( initiate_async_wait(), handler, this, w); } #if !defined(ASIO_NO_EXTENSIONS) /// Accept a new connection. /** * This function is used to accept a new connection from a peer into the * given socket. The function call will block until a new connection has been * accepted successfully or an error occurs. * * @param peer The socket into which the new connection will be accepted. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(my_context); * acceptor.accept(socket); * @endcode */ template void accept(basic_socket& peer, typename enable_if< is_convertible::value >::type* = 0) { asio::error_code ec; impl_.get_service().accept(impl_.get_implementation(), peer, static_cast(0), ec); asio::detail::throw_error(ec, "accept"); } /// Accept a new connection. /** * This function is used to accept a new connection from a peer into the * given socket. The function call will block until a new connection has been * accepted successfully or an error occurs. * * @param peer The socket into which the new connection will be accepted. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(my_context); * asio::error_code ec; * acceptor.accept(socket, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template ASIO_SYNC_OP_VOID accept( basic_socket& peer, asio::error_code& ec, typename enable_if< is_convertible::value >::type* = 0) { impl_.get_service().accept(impl_.get_implementation(), peer, static_cast(0), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection into a * socket. The function call always returns immediately. * * @param peer The socket into which the new connection will be accepted. * Ownership of the peer object is retained by the caller, which must * guarantee that it is valid until the handler is called. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(my_context); * acceptor.async_accept(socket, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(AcceptHandler, void (asio::error_code)) async_accept(basic_socket& peer, ASIO_MOVE_ARG(AcceptHandler) handler, typename enable_if< is_convertible::value >::type* = 0) { return async_initiate( initiate_async_accept(), handler, this, &peer, static_cast(0)); } /// Accept a new connection and obtain the endpoint of the peer /** * This function is used to accept a new connection from a peer into the * given socket, and additionally provide the endpoint of the remote peer. * The function call will block until a new connection has been accepted * successfully or an error occurs. * * @param peer The socket into which the new connection will be accepted. * * @param peer_endpoint An endpoint object which will receive the endpoint of * the remote peer. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(my_context); * asio::ip::tcp::endpoint endpoint; * acceptor.accept(socket, endpoint); * @endcode */ template void accept(basic_socket& peer, endpoint_type& peer_endpoint) { asio::error_code ec; impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); asio::detail::throw_error(ec, "accept"); } /// Accept a new connection and obtain the endpoint of the peer /** * This function is used to accept a new connection from a peer into the * given socket, and additionally provide the endpoint of the remote peer. * The function call will block until a new connection has been accepted * successfully or an error occurs. * * @param peer The socket into which the new connection will be accepted. * * @param peer_endpoint An endpoint object which will receive the endpoint of * the remote peer. * * @param ec Set to indicate what error occurred, if any. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(my_context); * asio::ip::tcp::endpoint endpoint; * asio::error_code ec; * acceptor.accept(socket, endpoint, ec); * if (ec) * { * // An error occurred. * } * @endcode */ template ASIO_SYNC_OP_VOID accept(basic_socket& peer, endpoint_type& peer_endpoint, asio::error_code& ec) { impl_.get_service().accept( impl_.get_implementation(), peer, &peer_endpoint, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection into a * socket, and additionally obtain the endpoint of the remote peer. The * function call always returns immediately. * * @param peer The socket into which the new connection will be accepted. * Ownership of the peer object is retained by the caller, which must * guarantee that it is valid until the handler is called. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. Ownership of the peer_endpoint object is * retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(AcceptHandler, void (asio::error_code)) async_accept(basic_socket& peer, endpoint_type& peer_endpoint, ASIO_MOVE_ARG(AcceptHandler) handler) { return async_initiate( initiate_async_accept(), handler, this, &peer, &peer_endpoint); } #endif // !defined(ASIO_NO_EXTENSIONS) #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept()); * @endcode */ typename Protocol::socket accept() { asio::error_code ec; typename Protocol::socket peer(impl_.get_executor()); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept(ec)); * if (ec) * { * // An error occurred. * } * @endcode */ typename Protocol::socket accept(asio::error_code& ec) { typename Protocol::socket peer(impl_.get_executor()); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); return peer; } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket peer // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * acceptor.async_accept(accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket)) async_accept(ASIO_MOVE_ARG(MoveAcceptHandler) handler) { return async_initiate( initiate_async_move_accept(), handler, this, impl_.get_executor(), static_cast(0), static_cast(0)); } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly * accepted socket. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept()); * @endcode */ template typename Protocol::socket::template rebind_executor::other accept(const Executor1& ex, typename enable_if< is_executor::value >::type* = 0) { asio::error_code ec; typename Protocol::socket::template rebind_executor::other peer(ex); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept()); * @endcode */ template typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other accept(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) { asio::error_code ec; typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other peer(context); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly accepted * socket. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept(my_context2, ec)); * if (ec) * { * // An error occurred. * } * @endcode */ template typename Protocol::socket::template rebind_executor::other accept(const Executor1& ex, asio::error_code& ec, typename enable_if< is_executor::value >::type* = 0) { typename Protocol::socket::template rebind_executor::other peer(ex); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::socket socket(acceptor.accept(my_context2, ec)); * if (ec) * { * // An error occurred. * } * @endcode */ template typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other accept(ExecutionContext& context, asio::error_code& ec, typename enable_if< is_convertible::value >::type* = 0) { typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other peer(context); impl_.get_service().accept(impl_.get_implementation(), peer, 0, ec); return peer; } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly accepted * socket. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket::template rebind_executor< * Executor1>::other peer // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * acceptor.async_accept(my_context2, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket::template rebind_executor< Executor1>::other)) async_accept(const Executor1& ex, ASIO_MOVE_ARG(MoveAcceptHandler) handler, typename enable_if< is_executor::value >::type* = 0) { typedef typename Protocol::socket::template rebind_executor< Executor1>::other other_socket_type; return async_initiate( initiate_async_move_accept(), handler, this, ex, static_cast(0), static_cast(0)); } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket::template rebind_executor< * typename ExecutionContext::executor_type>::other peer * // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * acceptor.async_accept(my_context2, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other)) async_accept(ExecutionContext& context, ASIO_MOVE_ARG(MoveAcceptHandler) handler, typename enable_if< is_convertible::value >::type* = 0) { typedef typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other other_socket_type; return async_initiate( initiate_async_move_accept(), handler, this, context.get_executor(), static_cast(0), static_cast(0)); } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket(acceptor.accept(endpoint)); * @endcode */ typename Protocol::socket accept(endpoint_type& peer_endpoint) { asio::error_code ec; typename Protocol::socket peer(impl_.get_executor()); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket(acceptor.accept(endpoint, ec)); * if (ec) * { * // An error occurred. * } * @endcode */ typename Protocol::socket accept( endpoint_type& peer_endpoint, asio::error_code& ec) { typename Protocol::socket peer(impl_.get_executor()); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); return peer; } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. Ownership of the peer_endpoint object is * retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket peer // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * acceptor.async_accept(endpoint, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket)) async_accept(endpoint_type& peer_endpoint, ASIO_MOVE_ARG(MoveAcceptHandler) handler) { return async_initiate( initiate_async_move_accept(), handler, this, impl_.get_executor(), &peer_endpoint, static_cast(0)); } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly accepted * socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket( * acceptor.accept(my_context2, endpoint)); * @endcode */ template typename Protocol::socket::template rebind_executor::other accept(const Executor1& ex, endpoint_type& peer_endpoint, typename enable_if< is_executor::value >::type* = 0) { asio::error_code ec; typename Protocol::socket::template rebind_executor::other peer(ex); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @returns A socket object representing the newly accepted connection. * * @throws asio::system_error Thrown on failure. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket( * acceptor.accept(my_context2, endpoint)); * @endcode */ template typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other accept(ExecutionContext& context, endpoint_type& peer_endpoint, typename enable_if< is_convertible::value >::type* = 0) { asio::error_code ec; typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other peer(context); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); asio::detail::throw_error(ec, "accept"); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly accepted * socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket( * acceptor.accept(my_context2, endpoint, ec)); * if (ec) * { * // An error occurred. * } * @endcode */ template typename Protocol::socket::template rebind_executor::other accept(const executor_type& ex, endpoint_type& peer_endpoint, asio::error_code& ec, typename enable_if< is_executor::value >::type* = 0) { typename Protocol::socket::template rebind_executor::other peer(ex); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); return peer; } /// Accept a new connection. /** * This function is used to accept a new connection from a peer. The function * call will block until a new connection has been accepted successfully or * an error occurs. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns On success, a socket object representing the newly accepted * connection. On error, a socket object where is_open() is false. * * @par Example * @code * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * asio::ip::tcp::socket socket( * acceptor.accept(my_context2, endpoint, ec)); * if (ec) * { * // An error occurred. * } * @endcode */ template typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other accept(ExecutionContext& context, endpoint_type& peer_endpoint, asio::error_code& ec, typename enable_if< is_convertible::value >::type* = 0) { typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other peer(context); impl_.get_service().accept(impl_.get_implementation(), peer, &peer_endpoint, ec); return peer; } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param ex The I/O executor object to be used for the newly accepted * socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. Ownership of the peer_endpoint object is * retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket::template rebind_executor< * Executor1>::other peer // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * acceptor.async_accept(my_context2, endpoint, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket::template rebind_executor< Executor1>::other)) async_accept(const Executor1& ex, endpoint_type& peer_endpoint, ASIO_MOVE_ARG(MoveAcceptHandler) handler, typename enable_if< is_executor::value >::type* = 0) { typedef typename Protocol::socket::template rebind_executor< Executor1>::other other_socket_type; return async_initiate( initiate_async_move_accept(), handler, this, ex, &peer_endpoint, static_cast(0)); } /// Start an asynchronous accept. /** * This function is used to asynchronously accept a new connection. The * function call always returns immediately. * * This overload requires that the Protocol template parameter satisfy the * AcceptableProtocol type requirements. * * @param context The I/O execution context object to be used for the newly * accepted socket. * * @param peer_endpoint An endpoint object into which the endpoint of the * remote peer will be written. Ownership of the peer_endpoint object is * retained by the caller, which must guarantee that it is valid until the * handler is called. * * @param handler The handler to be called when the accept operation * completes. Copies will be made of the handler as required. The function * signature of the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * typename Protocol::socket::template rebind_executor< * typename ExecutionContext::executor_type>::other peer * // On success, the newly accepted socket. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * @code * void accept_handler(const asio::error_code& error, * asio::ip::tcp::socket peer) * { * if (!error) * { * // Accept succeeded. * } * } * * ... * * asio::ip::tcp::acceptor acceptor(my_context); * ... * asio::ip::tcp::endpoint endpoint; * acceptor.async_accept(my_context2, endpoint, accept_handler); * @endcode */ template ASIO_INITFN_RESULT_TYPE(MoveAcceptHandler, void (asio::error_code, typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other)) async_accept(ExecutionContext& context, endpoint_type& peer_endpoint, ASIO_MOVE_ARG(MoveAcceptHandler) handler, typename enable_if< is_convertible::value >::type* = 0) { typedef typename Protocol::socket::template rebind_executor< typename ExecutionContext::executor_type>::other other_socket_type; return async_initiate( initiate_async_move_accept(), handler, this, context.get_executor(), &peer_endpoint, static_cast(0)); } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) private: // Disallow copying and assignment. basic_socket_acceptor(const basic_socket_acceptor&) ASIO_DELETED; basic_socket_acceptor& operator=( const basic_socket_acceptor&) ASIO_DELETED; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(WaitHandler) handler, basic_socket_acceptor* self, wait_type w) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), w, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_accept { template void operator()(ASIO_MOVE_ARG(AcceptHandler) handler, basic_socket_acceptor* self, basic_socket* peer, endpoint_type* peer_endpoint) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a AcceptHandler. ASIO_ACCEPT_HANDLER_CHECK(AcceptHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_accept( self->impl_.get_implementation(), *peer, peer_endpoint, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_move_accept { template void operator()(ASIO_MOVE_ARG(MoveAcceptHandler) handler, basic_socket_acceptor* self, const Executor1& peer_ex, endpoint_type* peer_endpoint, Socket*) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a MoveAcceptHandler. ASIO_MOVE_ACCEPT_HANDLER_CHECK( MoveAcceptHandler, handler, Socket) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_move_accept( self->impl_.get_implementation(), peer_ex, peer_endpoint, handler2.value, self->impl_.get_implementation_executor()); } }; #if defined(ASIO_WINDOWS_RUNTIME) detail::io_object_impl< detail::null_socket_service, Executor> impl_; #elif defined(ASIO_HAS_IOCP) detail::io_object_impl< detail::win_iocp_socket_service, Executor> impl_; #else detail::io_object_impl< detail::reactive_socket_service, Executor> impl_; #endif }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_SOCKET_ACCEPTOR_HPP galera-4-26.4.25/asio/asio/redirect_error.hpp000644 000164 177776 00000003400 15107057155 022122 0ustar00jenkinsnogroup000000 000000 // // redirect_error.hpp // ~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_REDIRECT_ERROR_HPP #define ASIO_REDIRECT_ERROR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error_code.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Completion token type used to specify that an error produced by an /// asynchronous operation is captured to an error_code variable. /** * The redirect_error_t class is used to indicate that any error_code produced * by an asynchronous operation is captured to a specified variable. */ template class redirect_error_t { public: /// Constructor. template redirect_error_t(ASIO_MOVE_ARG(T) completion_token, asio::error_code& ec) : token_(ASIO_MOVE_CAST(T)(completion_token)), ec_(ec) { } //private: CompletionToken token_; asio::error_code& ec_; }; /// Create a completion token to capture error_code values to a variable. template inline redirect_error_t::type> redirect_error( ASIO_MOVE_ARG(CompletionToken) completion_token, asio::error_code& ec) { return redirect_error_t::type>( ASIO_MOVE_CAST(CompletionToken)(completion_token), ec); } } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/redirect_error.hpp" #endif // ASIO_REDIRECT_ERROR_HPP galera-4-26.4.25/asio/asio/dispatch.hpp000644 000164 177776 00000007440 15107057155 020717 0ustar00jenkinsnogroup000000 000000 // // dispatch.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_DISPATCH_HPP #define ASIO_DISPATCH_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/async_result.hpp" #include "asio/detail/type_traits.hpp" #include "asio/execution_context.hpp" #include "asio/is_executor.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the object's associated * executor. The function object may be called from the current thread prior to * returning from dispatch(). Otherwise, it is queued for execution. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex by performing * get_associated_executor(handler). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Performs ex.dispatch(std::move(handler), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( ASIO_MOVE_ARG(CompletionToken) token); /// Submits a completion token or function object for execution. /** * This function submits an object for execution using the specified executor. * The function object may be called from the current thread prior to returning * from dispatch(). Otherwise, it is queued for execution. * * This function has the following effects: * * @li Constructs a function object handler of type @c Handler, initialized * with handler(forward(token)). * * @li Constructs an object @c result of type async_result, * initializing the object as result(handler). * * @li Obtains the handler's associated executor object @c ex1 by performing * get_associated_executor(handler). * * @li Creates a work object @c w by performing make_work(ex1). * * @li Obtains the handler's associated allocator object @c alloc by performing * get_associated_allocator(handler). * * @li Constructs a function object @c f with a function call operator that * performs ex1.dispatch(std::move(handler), alloc) followed by * w.reset(). * * @li Performs Executor(ex).dispatch(std::move(f), alloc). * * @li Returns result.get(). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( const Executor& ex, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); /// Submits a completion token or function object for execution. /** * @returns dispatch(ctx.get_executor(), * forward(token)). */ template ASIO_INITFN_RESULT_TYPE(CompletionToken, void()) dispatch( ExecutionContext& ctx, ASIO_MOVE_ARG(CompletionToken) token, typename enable_if::value>::type* = 0); } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/dispatch.hpp" #endif // ASIO_DISPATCH_HPP galera-4-26.4.25/asio/asio/high_resolution_timer.hpp000644 000164 177776 00000002433 15107057155 023517 0ustar00jenkinsnogroup000000 000000 // // high_resolution_timer.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HIGH_RESOLUTION_TIMER_HPP #define ASIO_HIGH_RESOLUTION_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #include "asio/basic_waitable_timer.hpp" #include "asio/detail/chrono.hpp" namespace asio { /// Typedef for a timer based on the high resolution clock. /** * This typedef uses the C++11 @c <chrono> standard library facility, if * available. Otherwise, it may use the Boost.Chrono library. To explicitly * utilise Boost.Chrono, use the basic_waitable_timer template directly: * @code * typedef basic_waitable_timer timer; * @endcode */ typedef basic_waitable_timer< chrono::high_resolution_clock> high_resolution_timer; } // namespace asio #endif // defined(ASIO_HAS_CHRONO) || defined(GENERATING_DOCUMENTATION) #endif // ASIO_HIGH_RESOLUTION_TIMER_HPP galera-4-26.4.25/asio/asio/write_at.hpp000644 000164 177776 00000066075 15107057155 020747 0ustar00jenkinsnogroup000000 000000 // // write_at.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_WRITE_AT_HPP #define ASIO_WRITE_AT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/async_result.hpp" #include "asio/detail/cstdint.hpp" #include "asio/error.hpp" #if !defined(ASIO_NO_EXTENSIONS) # include "asio/basic_streambuf_fwd.hpp" #endif // !defined(ASIO_NO_EXTENSIONS) #include "asio/detail/push_options.hpp" namespace asio { /** * @defgroup write_at asio::write_at * * @brief The @c write_at function is a composed operation that writes a * certain amount of data at a specified offset before returning. */ /*@{*/ /// Write all of the supplied data at the specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * device. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write_at(d, 42, asio::buffer(data, size)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write_at( * d, offset, buffers, * asio::transfer_all()); @endcode */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers); /// Write all of the supplied data at the specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * device. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write_at(d, 42, * asio::buffer(data, size), ec); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. * * @note This overload is equivalent to calling: * @code asio::write_at( * d, offset, buffers, * asio::transfer_all(), ec); @endcode */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, asio::error_code& ec); /// Write a certain amount of data at a specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * device. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's write_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::write_at(d, 42, asio::buffer(data, size), * asio::transfer_at_least(32)); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition); /// Write a certain amount of data at a specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. The sum * of the buffer sizes indicates the maximum number of bytes to write to the * device. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's write_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition, asio::error_code& ec); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Write all of the supplied data at the specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b The basic_streambuf object from which data will be written. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. * * @note This overload is equivalent to calling: * @code asio::write_at( * d, 42, b, * asio::transfer_all()); @endcode */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b); /// Write all of the supplied data at the specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b The basic_streambuf object from which data will be written. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes transferred. * * @note This overload is equivalent to calling: * @code asio::write_at( * d, 42, b, * asio::transfer_all(), ec); @endcode */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b, asio::error_code& ec); /// Write a certain amount of data at a specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's write_some_at function. * * @returns The number of bytes transferred. * * @throws asio::system_error Thrown on failure. */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition); /// Write a certain amount of data at a specified offset before returning. /** * This function is used to write a certain number of bytes of data to a random * access device at a specified offset. The call will block until one of the * following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * write_some_at function. * * @param d The device to which the data is to be written. The type must support * the SyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b The basic_streambuf object from which data will be written. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's write_some_at function. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. If an error occurs, returns the total * number of bytes successfully transferred prior to the error. */ template std::size_t write_at(SyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, asio::error_code& ec); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ /** * @defgroup async_write_at asio::async_write_at * * @brief The @c async_write_at function is a composed asynchronous operation * that writes a certain amount of data at the specified offset before * completion. */ /*@{*/ /// Start an asynchronous operation to write all of the supplied data at the /// specified offset. /** * This function is used to asynchronously write a certain number of bytes of * data to a random access device at a specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_write_some_at function, and is known as a composed operation. * The program must ensure that the device performs no overlapping * write operations (such as async_write_at, the device's async_write_some_at * function, or any other composed operations that perform writes) until this * operation completes. Operations are overlapping if the regions defined by * their offsets, and the numbers of bytes to write, intersect. * * @param d The device to which the data is to be written. The type must support * the AsyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes written from the buffers. If an error * // occurred, this will be less than the sum of the buffer sizes. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * asio::async_write_at(d, 42, asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler); /// Start an asynchronous operation to write a certain amount of data at the /// specified offset. /** * This function is used to asynchronously write a certain number of bytes of * data to a random access device at a specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li All of the data in the supplied buffers has been written. That is, the * bytes transferred is equal to the sum of the buffer sizes. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * async_write_some_at function, and is known as a composed operation. * The program must ensure that the device performs no overlapping * write operations (such as async_write_at, the device's async_write_some_at * function, or any other composed operations that perform writes) until this * operation completes. Operations are overlapping if the regions defined by * their offsets, and the numbers of bytes to write, intersect. * * @param d The device to which the data is to be written. The type must support * the AsyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param buffers One or more buffers containing the data to be written. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's async_write_some_at function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes written from the buffers. If an error * // occurred, this will be less than the sum of the buffer sizes. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code asio::async_write_at(d, 42, * asio::buffer(data, size), * asio::transfer_at_least(32), * handler); @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, const ConstBufferSequence& buffers, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler); #if !defined(ASIO_NO_EXTENSIONS) #if !defined(ASIO_NO_IOSTREAM) /// Start an asynchronous operation to write all of the supplied data at the /// specified offset. /** * This function is used to asynchronously write a certain number of bytes of * data to a random access device at a specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li An error occurred. * * This operation is implemented in terms of zero or more calls to the device's * async_write_some_at function, and is known as a composed operation. * The program must ensure that the device performs no overlapping * write operations (such as async_write_at, the device's async_write_some_at * function, or any other composed operations that perform writes) until this * operation completes. Operations are overlapping if the regions defined by * their offsets, and the numbers of bytes to write, intersect. * * @param d The device to which the data is to be written. The type must support * the AsyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes written from the buffers. If an error * // occurred, this will be less than the sum of the buffer sizes. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b, ASIO_MOVE_ARG(WriteHandler) handler); /// Start an asynchronous operation to write a certain amount of data at the /// specified offset. /** * This function is used to asynchronously write a certain number of bytes of * data to a random access device at a specified offset. The function call * always returns immediately. The asynchronous operation will continue until * one of the following conditions is true: * * @li All of the data in the supplied basic_streambuf has been written. * * @li The completion_condition function object returns 0. * * This operation is implemented in terms of zero or more calls to the device's * async_write_some_at function, and is known as a composed operation. * The program must ensure that the device performs no overlapping * write operations (such as async_write_at, the device's async_write_some_at * function, or any other composed operations that perform writes) until this * operation completes. Operations are overlapping if the regions defined by * their offsets, and the numbers of bytes to write, intersect. * * @param d The device to which the data is to be written. The type must support * the AsyncRandomAccessWriteDevice concept. * * @param offset The offset at which the data will be written. * * @param b A basic_streambuf object from which data will be written. Ownership * of the streambuf is retained by the caller, which must guarantee that it * remains valid until the handler is called. * * @param completion_condition The function object to be called to determine * whether the write operation is complete. The signature of the function object * must be: * @code std::size_t completion_condition( * // Result of latest async_write_some_at operation. * const asio::error_code& error, * * // Number of bytes transferred so far. * std::size_t bytes_transferred * ); @endcode * A return value of 0 indicates that the write operation is complete. A * non-zero return value indicates the maximum number of bytes to be written on * the next call to the device's async_write_some_at function. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * // Result of operation. * const asio::error_code& error, * * // Number of bytes written from the buffers. If an error * // occurred, this will be less than the sum of the buffer sizes. * std::size_t bytes_transferred * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_at(AsyncRandomAccessWriteDevice& d, uint64_t offset, basic_streambuf& b, CompletionCondition completion_condition, ASIO_MOVE_ARG(WriteHandler) handler); #endif // !defined(ASIO_NO_IOSTREAM) #endif // !defined(ASIO_NO_EXTENSIONS) /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/write_at.hpp" #endif // ASIO_WRITE_AT_HPP galera-4-26.4.25/asio/asio/basic_socket_iostream.hpp000644 000164 177776 00000031113 15107057155 023446 0ustar00jenkinsnogroup000000 000000 // // basic_socket_iostream.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SOCKET_IOSTREAM_HPP #define ASIO_BASIC_SOCKET_IOSTREAM_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if !defined(ASIO_NO_IOSTREAM) #include #include #include "asio/basic_socket_streambuf.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # include "asio/detail/variadic_templates.hpp" // A macro that should expand to: // template // explicit basic_socket_iostream(T1 x1, ..., Tn xn) // : std::basic_iostream( // &this->detail::socket_iostream_base< // Protocol, Clock, WaitTraits>::streambuf_) // { // if (rdbuf()->connect(x1, ..., xn) == 0) // this->setstate(std::ios_base::failbit); // } // This macro should only persist within this file. # define ASIO_PRIVATE_CTR_DEF(n) \ template \ explicit basic_socket_iostream(ASIO_VARIADIC_BYVAL_PARAMS(n)) \ : std::basic_iostream( \ &this->detail::socket_iostream_base< \ Protocol, Clock, WaitTraits>::streambuf_) \ { \ this->setf(std::ios_base::unitbuf); \ if (rdbuf()->connect(ASIO_VARIADIC_BYVAL_ARGS(n)) == 0) \ this->setstate(std::ios_base::failbit); \ } \ /**/ // A macro that should expand to: // template // void connect(T1 x1, ..., Tn xn) // { // if (rdbuf()->connect(x1, ..., xn) == 0) // this->setstate(std::ios_base::failbit); // } // This macro should only persist within this file. # define ASIO_PRIVATE_CONNECT_DEF(n) \ template \ void connect(ASIO_VARIADIC_BYVAL_PARAMS(n)) \ { \ if (rdbuf()->connect(ASIO_VARIADIC_BYVAL_ARGS(n)) == 0) \ this->setstate(std::ios_base::failbit); \ } \ /**/ #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #include "asio/detail/push_options.hpp" namespace asio { namespace detail { // A separate base class is used to ensure that the streambuf is initialised // prior to the basic_socket_iostream's basic_iostream base class. template class socket_iostream_base { protected: socket_iostream_base() { } #if defined(ASIO_HAS_MOVE) socket_iostream_base(socket_iostream_base&& other) : streambuf_(std::move(other.streambuf_)) { } socket_iostream_base(basic_stream_socket s) : streambuf_(std::move(s)) { } socket_iostream_base& operator=(socket_iostream_base&& other) { streambuf_ = std::move(other.streambuf_); return *this; } #endif // defined(ASIO_HAS_MOVE) basic_socket_streambuf streambuf_; }; } // namespace detail #if !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL) #define ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL // Forward declaration with defaulted arguments. template > #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typename Clock = chrono::steady_clock, typename WaitTraits = wait_traits > #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) class basic_socket_iostream; #endif // !defined(ASIO_BASIC_SOCKET_IOSTREAM_FWD_DECL) /// Iostream interface for a socket. #if defined(GENERATING_DOCUMENTATION) template > #else // defined(GENERATING_DOCUMENTATION) template #endif // defined(GENERATING_DOCUMENTATION) class basic_socket_iostream : private detail::socket_iostream_base, public std::basic_iostream { private: // These typedefs are intended keep this class's implementation independent // of whether it's using Boost.DateClock, Boost.Chrono or std::chrono. #if defined(ASIO_HAS_BOOST_DATE_TIME) \ && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typedef WaitTraits traits_helper; #else // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) typedef detail::chrono_time_traits traits_helper; #endif // defined(ASIO_HAS_BOOST_DATE_TIME) // && defined(ASIO_USE_BOOST_DATE_TIME_FOR_SOCKET_IOSTREAM) public: /// The protocol type. typedef Protocol protocol_type; /// The endpoint type. typedef typename Protocol::endpoint endpoint_type; /// The clock type. typedef Clock clock_type; #if defined(GENERATING_DOCUMENTATION) /// (Deprecated: Use time_point.) The time type. typedef typename WaitTraits::time_type time_type; /// The time type. typedef typename WaitTraits::time_point time_point; /// (Deprecated: Use duration.) The duration type. typedef typename WaitTraits::duration_type duration_type; /// The duration type. typedef typename WaitTraits::duration duration; #else # if !defined(ASIO_NO_DEPRECATED) typedef typename traits_helper::time_type time_type; typedef typename traits_helper::duration_type duration_type; # endif // !defined(ASIO_NO_DEPRECATED) typedef typename traits_helper::time_type time_point; typedef typename traits_helper::duration_type duration; #endif /// Construct a basic_socket_iostream without establishing a connection. basic_socket_iostream() : std::basic_iostream( &this->detail::socket_iostream_base< Protocol, Clock, WaitTraits>::streambuf_) { this->setf(std::ios_base::unitbuf); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Construct a basic_socket_iostream from the supplied socket. explicit basic_socket_iostream(basic_stream_socket s) : detail::socket_iostream_base< Protocol, Clock, WaitTraits>(std::move(s)), std::basic_iostream( &this->detail::socket_iostream_base< Protocol, Clock, WaitTraits>::streambuf_) { this->setf(std::ios_base::unitbuf); } #if defined(ASIO_HAS_STD_IOSTREAM_MOVE) \ || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_socket_iostream from another. basic_socket_iostream(basic_socket_iostream&& other) : detail::socket_iostream_base< Protocol, Clock, WaitTraits>(std::move(other)), std::basic_iostream(std::move(other)) { this->set_rdbuf(&this->detail::socket_iostream_base< Protocol, Clock, WaitTraits>::streambuf_); } /// Move-assign a basic_socket_iostream from another. basic_socket_iostream& operator=(basic_socket_iostream&& other) { std::basic_iostream::operator=(std::move(other)); detail::socket_iostream_base< Protocol, Clock, WaitTraits>::operator=(std::move(other)); return *this; } #endif // defined(ASIO_HAS_STD_IOSTREAM_MOVE) // || defined(GENERATING_DOCUMENTATION) #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) #if defined(GENERATING_DOCUMENTATION) /// Establish a connection to an endpoint corresponding to a resolver query. /** * This constructor automatically establishes a connection based on the * supplied resolver query parameters. The arguments are used to construct * a resolver query object. */ template explicit basic_socket_iostream(T1 t1, ..., TN tn); #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) template explicit basic_socket_iostream(T... x) : std::basic_iostream( &this->detail::socket_iostream_base< Protocol, Clock, WaitTraits>::streambuf_) { this->setf(std::ios_base::unitbuf); if (rdbuf()->connect(x...) == 0) this->setstate(std::ios_base::failbit); } #else ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CTR_DEF) #endif #if defined(GENERATING_DOCUMENTATION) /// Establish a connection to an endpoint corresponding to a resolver query. /** * This function automatically establishes a connection based on the supplied * resolver query parameters. The arguments are used to construct a resolver * query object. */ template void connect(T1 t1, ..., TN tn); #elif defined(ASIO_HAS_VARIADIC_TEMPLATES) template void connect(T... x) { if (rdbuf()->connect(x...) == 0) this->setstate(std::ios_base::failbit); } #else ASIO_VARIADIC_GENERATE(ASIO_PRIVATE_CONNECT_DEF) #endif /// Close the connection. void close() { if (rdbuf()->close() == 0) this->setstate(std::ios_base::failbit); } /// Return a pointer to the underlying streambuf. basic_socket_streambuf* rdbuf() const { return const_cast*>( &this->detail::socket_iostream_base< Protocol, Clock, WaitTraits>::streambuf_); } /// Get a reference to the underlying socket. basic_socket& socket() { return rdbuf()->socket(); } /// Get the last error associated with the stream. /** * @return An \c error_code corresponding to the last error from the stream. * * @par Example * To print the error associated with a failure to establish a connection: * @code tcp::iostream s("www.boost.org", "http"); * if (!s) * { * std::cout << "Error: " << s.error().message() << std::endl; * } @endcode */ const asio::error_code& error() const { return rdbuf()->error(); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use expiry().) Get the stream's expiry time as an absolute /// time. /** * @return An absolute time value representing the stream's expiry time. */ time_point expires_at() const { return rdbuf()->expires_at(); } #endif // !defined(ASIO_NO_DEPRECATED) /// Get the stream's expiry time as an absolute time. /** * @return An absolute time value representing the stream's expiry time. */ time_point expiry() const { return rdbuf()->expiry(); } /// Set the stream's expiry time as an absolute time. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the stream. */ void expires_at(const time_point& expiry_time) { rdbuf()->expires_at(expiry_time); } /// Set the stream's expiry time relative to now. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the timer. */ void expires_after(const duration& expiry_time) { rdbuf()->expires_after(expiry_time); } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use expiry().) Get the stream's expiry time relative to now. /** * @return A relative time value representing the stream's expiry time. */ duration expires_from_now() const { return rdbuf()->expires_from_now(); } /// (Deprecated: Use expires_after().) Set the stream's expiry time relative /// to now. /** * This function sets the expiry time associated with the stream. Stream * operations performed after this time (where the operations cannot be * completed using the internal buffers) will fail with the error * asio::error::operation_aborted. * * @param expiry_time The expiry time to be used for the timer. */ void expires_from_now(const duration& expiry_time) { rdbuf()->expires_from_now(expiry_time); } #endif // !defined(ASIO_NO_DEPRECATED) private: // Disallow copying and assignment. basic_socket_iostream(const basic_socket_iostream&) ASIO_DELETED; basic_socket_iostream& operator=( const basic_socket_iostream&) ASIO_DELETED; }; } // namespace asio #include "asio/detail/pop_options.hpp" #if !defined(ASIO_HAS_VARIADIC_TEMPLATES) # undef ASIO_PRIVATE_CTR_DEF # undef ASIO_PRIVATE_CONNECT_DEF #endif // !defined(ASIO_HAS_VARIADIC_TEMPLATES) #endif // !defined(ASIO_NO_IOSTREAM) #endif // ASIO_BASIC_SOCKET_IOSTREAM_HPP galera-4-26.4.25/asio/asio/uses_executor.hpp000644 000164 177776 00000004265 15107057155 022017 0ustar00jenkinsnogroup000000 000000 // // uses_executor.hpp // ~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_USES_EXECUTOR_HPP #define ASIO_USES_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A special type, similar to std::nothrow_t, used to disambiguate /// constructors that accept executor arguments. /** * The executor_arg_t struct is an empty structure type used as a unique type * to disambiguate constructor and function overloading. Specifically, some * types have constructors with executor_arg_t as the first argument, * immediately followed by an argument of a type that satisfies the Executor * type requirements. */ struct executor_arg_t { /// Constructor. ASIO_CONSTEXPR executor_arg_t() ASIO_NOEXCEPT { } }; /// A special value, similar to std::nothrow, used to disambiguate constructors /// that accept executor arguments. /** * See asio::executor_arg_t and asio::uses_executor * for more information. */ #if defined(ASIO_HAS_CONSTEXPR) || defined(GENERATING_DOCUMENTATION) constexpr executor_arg_t executor_arg; #elif defined(ASIO_MSVC) __declspec(selectany) executor_arg_t executor_arg; #endif /// The uses_executor trait detects whether a type T has an associated executor /// that is convertible from type Executor. /** * Meets the BinaryTypeTrait requirements. The Asio library provides a * definition that is derived from false_type. A program may specialize this * template to derive from true_type for a user-defined type T that can be * constructed with an executor, where the first argument of a constructor has * type executor_arg_t and the second argument is convertible from type * Executor. */ template struct uses_executor : false_type {}; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_USES_EXECUTOR_HPP galera-4-26.4.25/asio/asio/basic_waitable_timer.hpp000644 000164 177776 00000061546 15107057155 023260 0ustar00jenkinsnogroup000000 000000 // // basic_waitable_timer.hpp // ~~~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_WAITABLE_TIMER_HPP #define ASIO_BASIC_WAITABLE_TIMER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/chrono_time_traits.hpp" #include "asio/detail/deadline_timer_service.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/error.hpp" #include "asio/executor.hpp" #include "asio/wait_traits.hpp" #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { #if !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL) #define ASIO_BASIC_WAITABLE_TIMER_FWD_DECL // Forward declaration with defaulted arguments. template , typename Executor = executor> class basic_waitable_timer; #endif // !defined(ASIO_BASIC_WAITABLE_TIMER_FWD_DECL) /// Provides waitable timer functionality. /** * The basic_waitable_timer class template provides the ability to perform a * blocking or asynchronous wait for a timer to expire. * * A waitable timer is always in one of two states: "expired" or "not expired". * If the wait() or async_wait() function is called on an expired timer, the * wait operation will complete immediately. * * Most applications will use one of the asio::steady_timer, * asio::system_timer or asio::high_resolution_timer typedefs. * * @note This waitable timer functionality is for use with the C++11 standard * library's @c <chrono> facility, or with the Boost.Chrono library. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. * * @par Examples * Performing a blocking wait (C++11): * @code * // Construct a timer without setting an expiry time. * asio::steady_timer timer(my_context); * * // Set an expiry time relative to now. * timer.expires_after(std::chrono::seconds(5)); * * // Wait for the timer to expire. * timer.wait(); * @endcode * * @par * Performing an asynchronous wait (C++11): * @code * void handler(const asio::error_code& error) * { * if (!error) * { * // Timer expired. * } * } * * ... * * // Construct a timer with an absolute expiry time. * asio::steady_timer timer(my_context, * std::chrono::steady_clock::now() + std::chrono::seconds(60)); * * // Start an asynchronous wait. * timer.async_wait(handler); * @endcode * * @par Changing an active waitable timer's expiry time * * Changing the expiry time of a timer while there are pending asynchronous * waits causes those wait operations to be cancelled. To ensure that the action * associated with the timer is performed only once, use something like this: * used: * * @code * void on_some_event() * { * if (my_timer.expires_after(seconds(5)) > 0) * { * // We managed to cancel the timer. Start new asynchronous wait. * my_timer.async_wait(on_timeout); * } * else * { * // Too late, timer has already expired! * } * } * * void on_timeout(const asio::error_code& e) * { * if (e != asio::error::operation_aborted) * { * // Timer was not cancelled, take necessary action. * } * } * @endcode * * @li The asio::basic_waitable_timer::expires_after() function * cancels any pending asynchronous waits, and returns the number of * asynchronous waits that were cancelled. If it returns 0 then you were too * late and the wait handler has already been executed, or will soon be * executed. If it returns 1 then the wait handler was successfully cancelled. * * @li If a wait handler is cancelled, the asio::error_code passed to * it contains the value asio::error::operation_aborted. */ template class basic_waitable_timer { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The clock type. typedef Clock clock_type; /// The duration type of the clock. typedef typename clock_type::duration duration; /// The time point type of the clock. typedef typename clock_type::time_point time_point; /// The wait traits type. typedef WaitTraits traits_type; /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_after() functions must be called to set an expiry * time before the timer can be waited on. * * @param ex The I/O executor that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. */ explicit basic_waitable_timer(const executor_type& ex) : impl_(ex) { } /// Constructor. /** * This constructor creates a timer without setting an expiry time. The * expires_at() or expires_after() functions must be called to set an expiry * time before the timer can be waited on. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. */ template explicit basic_waitable_timer(ExecutionContext& context, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param ex The I/O executor object that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ basic_waitable_timer(const executor_type& ex, const time_point& expiry_time) : impl_(ex) { asio::error_code ec; impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time as an absolute time. /** * This constructor creates a timer and sets the expiry time. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, expressed * as an absolute time. */ template explicit basic_waitable_timer(ExecutionContext& context, const time_point& expiry_time, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().expires_at(impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param ex The I/O executor that the timer will use, by default, to * dispatch handlers for any asynchronous operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ basic_waitable_timer(const executor_type& ex, const duration& expiry_time) : impl_(ex) { asio::error_code ec; impl_.get_service().expires_after( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_after"); } /// Constructor to set a particular expiry time relative to now. /** * This constructor creates a timer and sets the expiry time. * * @param context An execution context which provides the I/O executor that * the timer will use, by default, to dispatch handlers for any asynchronous * operations performed on the timer. * * @param expiry_time The expiry time to be used for the timer, relative to * now. */ template explicit basic_waitable_timer(ExecutionContext& context, const duration& expiry_time, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().expires_after( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_after"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_waitable_timer from another. /** * This constructor moves a timer from one object to another. * * @param other The other basic_waitable_timer object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_waitable_timer(const executor_type&) * constructor. */ basic_waitable_timer(basic_waitable_timer&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_waitable_timer from another. /** * This assignment operator moves a timer from one object to another. Cancels * any outstanding asynchronous operations associated with the target object. * * @param other The other basic_waitable_timer object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_waitable_timer(const executor_type&) * constructor. */ basic_waitable_timer& operator=(basic_waitable_timer&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the timer. /** * This function destroys the timer, cancelling any outstanding asynchronous * wait operations associated with the timer as if by calling @c cancel. */ ~basic_waitable_timer() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Cancel any asynchronous operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel() { asio::error_code ec; std::size_t s = impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); return s; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Cancel any asynchronous /// operations that are waiting on the timer. /** * This function forces the completion of any pending asynchronous wait * operations against the timer. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when cancel() is called, then the * handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel(asio::error_code& ec) { return impl_.get_service().cancel(impl_.get_implementation(), ec); } #endif // !defined(ASIO_NO_DEPRECATED) /// Cancels one asynchronous operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one() { asio::error_code ec; std::size_t s = impl_.get_service().cancel_one( impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel_one"); return s; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Cancels one asynchronous /// operation that is waiting on the timer. /** * This function forces the completion of one pending asynchronous wait * operation against the timer. Handlers are cancelled in FIFO order. The * handler for the cancelled operation will be invoked with the * asio::error::operation_aborted error code. * * Cancelling the timer does not change the expiry time. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. That is, * either 0 or 1. * * @note If the timer has already expired when cancel_one() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t cancel_one(asio::error_code& ec) { return impl_.get_service().cancel_one(impl_.get_implementation(), ec); } /// (Deprecated: Use expiry().) Get the timer's expiry time as an absolute /// time. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ time_point expires_at() const { return impl_.get_service().expires_at(impl_.get_implementation()); } #endif // !defined(ASIO_NO_DEPRECATED) /// Get the timer's expiry time as an absolute time. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ time_point expiry() const { return impl_.get_service().expiry(impl_.get_implementation()); } /// Set the timer's expiry time as an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_point& expiry_time) { asio::error_code ec; std::size_t s = impl_.get_service().expires_at( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_at"); return s; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use non-error_code overload.) Set the timer's expiry time as /// an absolute time. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_at() is called, then * the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_at(const time_point& expiry_time, asio::error_code& ec) { return impl_.get_service().expires_at( impl_.get_implementation(), expiry_time, ec); } #endif // !defined(ASIO_NO_DEPRECATED) /// Set the timer's expiry time relative to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_after() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_after(const duration& expiry_time) { asio::error_code ec; std::size_t s = impl_.get_service().expires_after( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_after"); return s; } #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use expiry().) Get the timer's expiry time relative to now. /** * This function may be used to obtain the timer's current expiry time. * Whether the timer has expired or not does not affect this value. */ duration expires_from_now() const { return impl_.get_service().expires_from_now(impl_.get_implementation()); } /// (Deprecated: Use expires_after().) Set the timer's expiry time relative /// to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @return The number of asynchronous operations that were cancelled. * * @throws asio::system_error Thrown on failure. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration& expiry_time) { asio::error_code ec; std::size_t s = impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); asio::detail::throw_error(ec, "expires_from_now"); return s; } /// (Deprecated: Use expires_after().) Set the timer's expiry time relative /// to now. /** * This function sets the expiry time. Any pending asynchronous wait * operations will be cancelled. The handler for each cancelled operation will * be invoked with the asio::error::operation_aborted error code. * * @param expiry_time The expiry time to be used for the timer. * * @param ec Set to indicate what error occurred, if any. * * @return The number of asynchronous operations that were cancelled. * * @note If the timer has already expired when expires_from_now() is called, * then the handlers for asynchronous wait operations will: * * @li have already been invoked; or * * @li have been queued for invocation in the near future. * * These handlers can no longer be cancelled, and therefore are passed an * error code that indicates the successful completion of the wait operation. */ std::size_t expires_from_now(const duration& expiry_time, asio::error_code& ec) { return impl_.get_service().expires_from_now( impl_.get_implementation(), expiry_time, ec); } #endif // !defined(ASIO_NO_DEPRECATED) /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @throws asio::system_error Thrown on failure. */ void wait() { asio::error_code ec; impl_.get_service().wait(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "wait"); } /// Perform a blocking wait on the timer. /** * This function is used to wait for the timer to expire. This function * blocks and does not return until the timer has expired. * * @param ec Set to indicate what error occurred, if any. */ void wait(asio::error_code& ec) { impl_.get_service().wait(impl_.get_implementation(), ec); } /// Start an asynchronous wait on the timer. /** * This function may be used to initiate an asynchronous wait against the * timer. It always returns immediately. * * For each call to async_wait(), the supplied handler will be called exactly * once. The handler will be called when: * * @li The timer has expired. * * @li The timer was cancelled, in which case the handler is passed the error * code asio::error::operation_aborted. * * @param handler The handler to be called when the timer expires. Copies * will be made of the handler as required. The function signature of the * handler must be: * @code void handler( * const asio::error_code& error // Result of operation. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). */ template ASIO_INITFN_RESULT_TYPE(WaitHandler, void (asio::error_code)) async_wait(ASIO_MOVE_ARG(WaitHandler) handler) { return async_initiate( initiate_async_wait(), handler, this); } private: // Disallow copying and assignment. basic_waitable_timer(const basic_waitable_timer&) ASIO_DELETED; basic_waitable_timer& operator=( const basic_waitable_timer&) ASIO_DELETED; struct initiate_async_wait { template void operator()(ASIO_MOVE_ARG(WaitHandler) handler, basic_waitable_timer* self) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WaitHandler. ASIO_WAIT_HANDLER_CHECK(WaitHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_wait( self->impl_.get_implementation(), handler2.value, self->impl_.get_implementation_executor()); } }; detail::io_object_impl< detail::deadline_timer_service< detail::chrono_time_traits >, executor_type > impl_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BASIC_WAITABLE_TIMER_HPP galera-4-26.4.25/asio/asio/executor.hpp000644 000164 177776 00000023146 15107057155 020757 0ustar00jenkinsnogroup000000 000000 // // executor.hpp // ~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_EXECUTOR_HPP #define ASIO_EXECUTOR_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/cstddef.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Exception thrown when trying to access an empty polymorphic executor. class bad_executor : public std::exception { public: /// Constructor. ASIO_DECL bad_executor() ASIO_NOEXCEPT; /// Obtain message associated with exception. ASIO_DECL virtual const char* what() const ASIO_NOEXCEPT_OR_NOTHROW; }; /// Polymorphic wrapper for executors. class executor { public: /// Default constructor. executor() ASIO_NOEXCEPT : impl_(0) { } /// Construct from nullptr. executor(nullptr_t) ASIO_NOEXCEPT : impl_(0) { } /// Copy constructor. executor(const executor& other) ASIO_NOEXCEPT : impl_(other.clone()) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move constructor. executor(executor&& other) ASIO_NOEXCEPT : impl_(other.impl_) { other.impl_ = 0; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Construct a polymorphic wrapper for the specified executor. template executor(Executor e); /// Allocator-aware constructor to create a polymorphic wrapper for the /// specified executor. template executor(allocator_arg_t, const Allocator& a, Executor e); /// Destructor. ~executor() { destroy(); } /// Assignment operator. executor& operator=(const executor& other) ASIO_NOEXCEPT { destroy(); impl_ = other.clone(); return *this; } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) // Move assignment operator. executor& operator=(executor&& other) ASIO_NOEXCEPT { destroy(); impl_ = other.impl_; other.impl_ = 0; return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Assignment operator for nullptr_t. executor& operator=(nullptr_t) ASIO_NOEXCEPT { destroy(); impl_ = 0; return *this; } /// Assignment operator to create a polymorphic wrapper for the specified /// executor. template executor& operator=(ASIO_MOVE_ARG(Executor) e) ASIO_NOEXCEPT { executor tmp(ASIO_MOVE_CAST(Executor)(e)); destroy(); impl_ = tmp.impl_; tmp.impl_ = 0; return *this; } /// Obtain the underlying execution context. execution_context& context() const ASIO_NOEXCEPT { return get_impl()->context(); } /// Inform the executor that it has some outstanding work to do. void on_work_started() const ASIO_NOEXCEPT { get_impl()->on_work_started(); } /// Inform the executor that some work is no longer outstanding. void on_work_finished() const ASIO_NOEXCEPT { get_impl()->on_work_finished(); } /// Request the executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object is executed according to the rules of the * target executor object. * * @param f The function object to be called. The executor will make a copy * of the handler object as required. The function signature of the function * object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object is executed according to the rules of the * target executor object. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the executor to invoke the given function object. /** * This function is used to ask the executor to execute the given function * object. The function object is executed according to the rules of the * target executor object. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; struct unspecified_bool_type_t {}; typedef void (*unspecified_bool_type)(unspecified_bool_type_t); static void unspecified_bool_true(unspecified_bool_type_t) {} /// Operator to test if the executor contains a valid target. operator unspecified_bool_type() const ASIO_NOEXCEPT { return impl_ ? &executor::unspecified_bool_true : 0; } /// Obtain type information for the target executor object. /** * @returns If @c *this has a target type of type @c T, typeid(T); * otherwise, typeid(void). */ #if !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION) const std::type_info& target_type() const ASIO_NOEXCEPT { return impl_ ? impl_->target_type() : typeid(void); } #else // !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION) const void* target_type() const ASIO_NOEXCEPT { return impl_ ? impl_->target_type() : 0; } #endif // !defined(ASIO_NO_TYPEID) || defined(GENERATING_DOCUMENTATION) /// Obtain a pointer to the target executor object. /** * @returns If target_type() == typeid(T), a pointer to the stored * executor target; otherwise, a null pointer. */ template Executor* target() ASIO_NOEXCEPT; /// Obtain a pointer to the target executor object. /** * @returns If target_type() == typeid(T), a pointer to the stored * executor target; otherwise, a null pointer. */ template const Executor* target() const ASIO_NOEXCEPT; /// Compare two executors for equality. friend bool operator==(const executor& a, const executor& b) ASIO_NOEXCEPT { if (a.impl_ == b.impl_) return true; if (!a.impl_ || !b.impl_) return false; return a.impl_->equals(b.impl_); } /// Compare two executors for inequality. friend bool operator!=(const executor& a, const executor& b) ASIO_NOEXCEPT { return !(a == b); } private: #if !defined(GENERATING_DOCUMENTATION) class function; template class impl; #if !defined(ASIO_NO_TYPEID) typedef const std::type_info& type_id_result_type; #else // !defined(ASIO_NO_TYPEID) typedef const void* type_id_result_type; #endif // !defined(ASIO_NO_TYPEID) template static type_id_result_type type_id() { #if !defined(ASIO_NO_TYPEID) return typeid(T); #else // !defined(ASIO_NO_TYPEID) static int unique_id; return &unique_id; #endif // !defined(ASIO_NO_TYPEID) } // Base class for all polymorphic executor implementations. class impl_base { public: virtual impl_base* clone() const ASIO_NOEXCEPT = 0; virtual void destroy() ASIO_NOEXCEPT = 0; virtual execution_context& context() ASIO_NOEXCEPT = 0; virtual void on_work_started() ASIO_NOEXCEPT = 0; virtual void on_work_finished() ASIO_NOEXCEPT = 0; virtual void dispatch(ASIO_MOVE_ARG(function)) = 0; virtual void post(ASIO_MOVE_ARG(function)) = 0; virtual void defer(ASIO_MOVE_ARG(function)) = 0; virtual type_id_result_type target_type() const ASIO_NOEXCEPT = 0; virtual void* target() ASIO_NOEXCEPT = 0; virtual const void* target() const ASIO_NOEXCEPT = 0; virtual bool equals(const impl_base* e) const ASIO_NOEXCEPT = 0; protected: impl_base(bool fast_dispatch) : fast_dispatch_(fast_dispatch) {} virtual ~impl_base() {} private: friend class executor; const bool fast_dispatch_; }; // Helper function to check and return the implementation pointer. impl_base* get_impl() const { if (!impl_) { bad_executor ex; asio::detail::throw_exception(ex); } return impl_; } // Helper function to clone another implementation. impl_base* clone() const ASIO_NOEXCEPT { return impl_ ? impl_->clone() : 0; } // Helper function to destroy an implementation. void destroy() ASIO_NOEXCEPT { if (impl_) impl_->destroy(); } impl_base* impl_; #endif // !defined(GENERATING_DOCUMENTATION) }; } // namespace asio ASIO_USES_ALLOCATOR(asio::executor) #include "asio/detail/pop_options.hpp" #include "asio/impl/executor.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/executor.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_EXECUTOR_HPP galera-4-26.4.25/asio/asio/handler_alloc_hook.hpp000644 000164 177776 00000004553 15107057155 022731 0ustar00jenkinsnogroup000000 000000 // // handler_alloc_hook.hpp // ~~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HANDLER_ALLOC_HOOK_HPP #define ASIO_HANDLER_ALLOC_HOOK_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/detail/push_options.hpp" namespace asio { /// Default allocation function for handlers. /** * Asynchronous operations may need to allocate temporary objects. Since * asynchronous operations have a handler function object, these temporary * objects can be said to be associated with the handler. * * Implement asio_handler_allocate and asio_handler_deallocate for your own * handlers to provide custom allocation for these temporary objects. * * The default implementation of these allocation hooks uses ::operator * new and ::operator delete. * * @note All temporary objects associated with a handler will be deallocated * before the upcall to the handler is performed. This allows the same memory to * be reused for a subsequent asynchronous operation initiated by the handler. * * @par Example * @code * class my_handler; * * void* asio_handler_allocate(std::size_t size, my_handler* context) * { * return ::operator new(size); * } * * void asio_handler_deallocate(void* pointer, std::size_t size, * my_handler* context) * { * ::operator delete(pointer); * } * @endcode */ ASIO_DECL void* asio_handler_allocate( std::size_t size, ...); /// Default deallocation function for handlers. /** * Implement asio_handler_allocate and asio_handler_deallocate for your own * handlers to provide custom allocation for the associated temporary objects. * * The default implementation of these allocation hooks uses ::operator * new and ::operator delete. * * @sa asio_handler_allocate. */ ASIO_DECL void asio_handler_deallocate( void* pointer, std::size_t size, ...); } // namespace asio #include "asio/detail/pop_options.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/handler_alloc_hook.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_HANDLER_ALLOC_HOOK_HPP galera-4-26.4.25/asio/asio/basic_serial_port.hpp000644 000164 177776 00000071752 15107057155 022613 0ustar00jenkinsnogroup000000 000000 // // basic_serial_port.hpp // ~~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BASIC_SERIAL_PORT_HPP #define ASIO_BASIC_SERIAL_PORT_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_SERIAL_PORT) \ || defined(GENERATING_DOCUMENTATION) #include #include "asio/async_result.hpp" #include "asio/detail/handler_type_requirements.hpp" #include "asio/detail/io_object_impl.hpp" #include "asio/detail/non_const_lvalue.hpp" #include "asio/detail/throw_error.hpp" #include "asio/detail/type_traits.hpp" #include "asio/error.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/serial_port_base.hpp" #if defined(ASIO_HAS_IOCP) # include "asio/detail/win_iocp_serial_port_service.hpp" #else # include "asio/detail/reactive_serial_port_service.hpp" #endif #if defined(ASIO_HAS_MOVE) # include #endif // defined(ASIO_HAS_MOVE) #include "asio/detail/push_options.hpp" namespace asio { /// Provides serial port functionality. /** * The basic_serial_port class provides a wrapper over serial port * functionality. * * @par Thread Safety * @e Distinct @e objects: Safe.@n * @e Shared @e objects: Unsafe. */ template class basic_serial_port : public serial_port_base { public: /// The type of the executor associated with the object. typedef Executor executor_type; /// The native representation of a serial port. #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined native_handle_type; #elif defined(ASIO_HAS_IOCP) typedef detail::win_iocp_serial_port_service::native_handle_type native_handle_type; #else typedef detail::reactive_serial_port_service::native_handle_type native_handle_type; #endif /// A basic_basic_serial_port is always the lowest layer. typedef basic_serial_port lowest_layer_type; /// Construct a basic_serial_port without opening it. /** * This constructor creates a serial port without opening it. * * @param ex The I/O executor that the serial port will use, by default, to * dispatch handlers for any asynchronous operations performed on the * serial port. */ explicit basic_serial_port(const executor_type& ex) : impl_(ex) { } /// Construct a basic_serial_port without opening it. /** * This constructor creates a serial port without opening it. * * @param context An execution context which provides the I/O executor that * the serial port will use, by default, to dispatch handlers for any * asynchronous operations performed on the serial port. */ template explicit basic_serial_port(ExecutionContext& context, typename enable_if< is_convertible::value, basic_serial_port >::type* = 0) : impl_(context) { } /// Construct and open a basic_serial_port. /** * This constructor creates and opens a serial port for the specified device * name. * * @param ex The I/O executor that the serial port will use, by default, to * dispatch handlers for any asynchronous operations performed on the * serial port. * * @param device The platform-specific device name for this serial * port. */ basic_serial_port(const executor_type& ex, const char* device) : impl_(ex) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), device, ec); asio::detail::throw_error(ec, "open"); } /// Construct and open a basic_serial_port. /** * This constructor creates and opens a serial port for the specified device * name. * * @param context An execution context which provides the I/O executor that * the serial port will use, by default, to dispatch handlers for any * asynchronous operations performed on the serial port. * * @param device The platform-specific device name for this serial * port. */ template basic_serial_port(ExecutionContext& context, const char* device, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), device, ec); asio::detail::throw_error(ec, "open"); } /// Construct and open a basic_serial_port. /** * This constructor creates and opens a serial port for the specified device * name. * * @param ex The I/O executor that the serial port will use, by default, to * dispatch handlers for any asynchronous operations performed on the * serial port. * * @param device The platform-specific device name for this serial * port. */ basic_serial_port(const executor_type& ex, const std::string& device) : impl_(ex) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), device, ec); asio::detail::throw_error(ec, "open"); } /// Construct and open a basic_serial_port. /** * This constructor creates and opens a serial port for the specified device * name. * * @param context An execution context which provides the I/O executor that * the serial port will use, by default, to dispatch handlers for any * asynchronous operations performed on the serial port. * * @param device The platform-specific device name for this serial * port. */ template basic_serial_port(ExecutionContext& context, const std::string& device, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), device, ec); asio::detail::throw_error(ec, "open"); } /// Construct a basic_serial_port on an existing native serial port. /** * This constructor creates a serial port object to hold an existing native * serial port. * * @param ex The I/O executor that the serial port will use, by default, to * dispatch handlers for any asynchronous operations performed on the * serial port. * * @param native_serial_port A native serial port. * * @throws asio::system_error Thrown on failure. */ basic_serial_port(const executor_type& ex, const native_handle_type& native_serial_port) : impl_(ex) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_serial_port, ec); asio::detail::throw_error(ec, "assign"); } /// Construct a basic_serial_port on an existing native serial port. /** * This constructor creates a serial port object to hold an existing native * serial port. * * @param context An execution context which provides the I/O executor that * the serial port will use, by default, to dispatch handlers for any * asynchronous operations performed on the serial port. * * @param native_serial_port A native serial port. * * @throws asio::system_error Thrown on failure. */ template basic_serial_port(ExecutionContext& context, const native_handle_type& native_serial_port, typename enable_if< is_convertible::value >::type* = 0) : impl_(context) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_serial_port, ec); asio::detail::throw_error(ec, "assign"); } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move-construct a basic_serial_port from another. /** * This constructor moves a serial port from one object to another. * * @param other The other basic_serial_port object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_serial_port(const executor_type&) * constructor. */ basic_serial_port(basic_serial_port&& other) : impl_(std::move(other.impl_)) { } /// Move-assign a basic_serial_port from another. /** * This assignment operator moves a serial port from one object to another. * * @param other The other basic_serial_port object from which the move will * occur. * * @note Following the move, the moved-from object is in the same state as if * constructed using the @c basic_serial_port(const executor_type&) * constructor. */ basic_serial_port& operator=(basic_serial_port&& other) { impl_ = std::move(other.impl_); return *this; } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Destroys the serial port. /** * This function destroys the serial port, cancelling any outstanding * asynchronous wait operations associated with the serial port as if by * calling @c cancel. */ ~basic_serial_port() { } /// Get the executor associated with the object. executor_type get_executor() ASIO_NOEXCEPT { return impl_.get_executor(); } /// Get a reference to the lowest layer. /** * This function returns a reference to the lowest layer in a stack of * layers. Since a basic_serial_port cannot contain any further layers, it * simply returns a reference to itself. * * @return A reference to the lowest layer in the stack of layers. Ownership * is not transferred to the caller. */ lowest_layer_type& lowest_layer() { return *this; } /// Get a const reference to the lowest layer. /** * This function returns a const reference to the lowest layer in a stack of * layers. Since a basic_serial_port cannot contain any further layers, it * simply returns a reference to itself. * * @return A const reference to the lowest layer in the stack of layers. * Ownership is not transferred to the caller. */ const lowest_layer_type& lowest_layer() const { return *this; } /// Open the serial port using the specified device name. /** * This function opens the serial port for the specified device name. * * @param device The platform-specific device name. * * @throws asio::system_error Thrown on failure. */ void open(const std::string& device) { asio::error_code ec; impl_.get_service().open(impl_.get_implementation(), device, ec); asio::detail::throw_error(ec, "open"); } /// Open the serial port using the specified device name. /** * This function opens the serial port using the given platform-specific * device name. * * @param device The platform-specific device name. * * @param ec Set the indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID open(const std::string& device, asio::error_code& ec) { impl_.get_service().open(impl_.get_implementation(), device, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Assign an existing native serial port to the serial port. /* * This function opens the serial port to hold an existing native serial port. * * @param native_serial_port A native serial port. * * @throws asio::system_error Thrown on failure. */ void assign(const native_handle_type& native_serial_port) { asio::error_code ec; impl_.get_service().assign(impl_.get_implementation(), native_serial_port, ec); asio::detail::throw_error(ec, "assign"); } /// Assign an existing native serial port to the serial port. /* * This function opens the serial port to hold an existing native serial port. * * @param native_serial_port A native serial port. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID assign(const native_handle_type& native_serial_port, asio::error_code& ec) { impl_.get_service().assign(impl_.get_implementation(), native_serial_port, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Determine whether the serial port is open. bool is_open() const { return impl_.get_service().is_open(impl_.get_implementation()); } /// Close the serial port. /** * This function is used to close the serial port. Any asynchronous read or * write operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void close() { asio::error_code ec; impl_.get_service().close(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "close"); } /// Close the serial port. /** * This function is used to close the serial port. Any asynchronous read or * write operations will be cancelled immediately, and will complete with the * asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID close(asio::error_code& ec) { impl_.get_service().close(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get the native serial port representation. /** * This function may be used to obtain the underlying representation of the * serial port. This is intended to allow access to native serial port * functionality that is not otherwise provided. */ native_handle_type native_handle() { return impl_.get_service().native_handle(impl_.get_implementation()); } /// Cancel all asynchronous operations associated with the serial port. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @throws asio::system_error Thrown on failure. */ void cancel() { asio::error_code ec; impl_.get_service().cancel(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "cancel"); } /// Cancel all asynchronous operations associated with the serial port. /** * This function causes all outstanding asynchronous read or write operations * to finish immediately, and the handlers for cancelled operations will be * passed the asio::error::operation_aborted error. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID cancel(asio::error_code& ec) { impl_.get_service().cancel(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Send a break sequence to the serial port. /** * This function causes a break sequence of platform-specific duration to be * sent out the serial port. * * @throws asio::system_error Thrown on failure. */ void send_break() { asio::error_code ec; impl_.get_service().send_break(impl_.get_implementation(), ec); asio::detail::throw_error(ec, "send_break"); } /// Send a break sequence to the serial port. /** * This function causes a break sequence of platform-specific duration to be * sent out the serial port. * * @param ec Set to indicate what error occurred, if any. */ ASIO_SYNC_OP_VOID send_break(asio::error_code& ec) { impl_.get_service().send_break(impl_.get_implementation(), ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Set an option on the serial port. /** * This function is used to set an option on the serial port. * * @param option The option value to be set on the serial port. * * @throws asio::system_error Thrown on failure. * * @sa SettableSerialPortOption @n * asio::serial_port_base::baud_rate @n * asio::serial_port_base::flow_control @n * asio::serial_port_base::parity @n * asio::serial_port_base::stop_bits @n * asio::serial_port_base::character_size */ template void set_option(const SettableSerialPortOption& option) { asio::error_code ec; impl_.get_service().set_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "set_option"); } /// Set an option on the serial port. /** * This function is used to set an option on the serial port. * * @param option The option value to be set on the serial port. * * @param ec Set to indicate what error occurred, if any. * * @sa SettableSerialPortOption @n * asio::serial_port_base::baud_rate @n * asio::serial_port_base::flow_control @n * asio::serial_port_base::parity @n * asio::serial_port_base::stop_bits @n * asio::serial_port_base::character_size */ template ASIO_SYNC_OP_VOID set_option(const SettableSerialPortOption& option, asio::error_code& ec) { impl_.get_service().set_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Get an option from the serial port. /** * This function is used to get the current value of an option on the serial * port. * * @param option The option value to be obtained from the serial port. * * @throws asio::system_error Thrown on failure. * * @sa GettableSerialPortOption @n * asio::serial_port_base::baud_rate @n * asio::serial_port_base::flow_control @n * asio::serial_port_base::parity @n * asio::serial_port_base::stop_bits @n * asio::serial_port_base::character_size */ template void get_option(GettableSerialPortOption& option) const { asio::error_code ec; impl_.get_service().get_option(impl_.get_implementation(), option, ec); asio::detail::throw_error(ec, "get_option"); } /// Get an option from the serial port. /** * This function is used to get the current value of an option on the serial * port. * * @param option The option value to be obtained from the serial port. * * @param ec Set to indicate what error occurred, if any. * * @sa GettableSerialPortOption @n * asio::serial_port_base::baud_rate @n * asio::serial_port_base::flow_control @n * asio::serial_port_base::parity @n * asio::serial_port_base::stop_bits @n * asio::serial_port_base::character_size */ template ASIO_SYNC_OP_VOID get_option(GettableSerialPortOption& option, asio::error_code& ec) const { impl_.get_service().get_option(impl_.get_implementation(), option, ec); ASIO_SYNC_OP_VOID_RETURN(ec); } /// Write some data to the serial port. /** * This function is used to write data to the serial port. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the serial port. * * @returns The number of bytes written. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * basic_serial_port.write_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t write_some(const ConstBufferSequence& buffers) { asio::error_code ec; std::size_t s = impl_.get_service().write_some( impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "write_some"); return s; } /// Write some data to the serial port. /** * This function is used to write data to the serial port. The function call * will block until one or more bytes of the data has been written * successfully, or until an error occurs. * * @param buffers One or more data buffers to be written to the serial port. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes written. Returns 0 if an error occurred. * * @note The write_some operation may not transmit all of the data to the * peer. Consider using the @ref write function if you need to ensure that * all data is written before the blocking operation completes. */ template std::size_t write_some(const ConstBufferSequence& buffers, asio::error_code& ec) { return impl_.get_service().write_some( impl_.get_implementation(), buffers, ec); } /// Start an asynchronous write. /** * This function is used to asynchronously write data to the serial port. * The function call always returns immediately. * * @param buffers One or more data buffers to be written to the serial port. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the write operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes written. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The write operation may not transmit all of the data to the peer. * Consider using the @ref async_write function if you need to ensure that all * data is written before the asynchronous operation completes. * * @par Example * To write a single data buffer use the @ref buffer function as follows: * @code * basic_serial_port.async_write_some( * asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on writing multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(WriteHandler, void (asio::error_code, std::size_t)) async_write_some(const ConstBufferSequence& buffers, ASIO_MOVE_ARG(WriteHandler) handler) { return async_initiate( initiate_async_write_some(), handler, this, buffers); } /// Read some data from the serial port. /** * This function is used to read data from the serial port. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @returns The number of bytes read. * * @throws asio::system_error Thrown on failure. An error code of * asio::error::eof indicates that the connection was closed by the * peer. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * basic_serial_port.read_some(asio::buffer(data, size)); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template std::size_t read_some(const MutableBufferSequence& buffers) { asio::error_code ec; std::size_t s = impl_.get_service().read_some( impl_.get_implementation(), buffers, ec); asio::detail::throw_error(ec, "read_some"); return s; } /// Read some data from the serial port. /** * This function is used to read data from the serial port. The function * call will block until one or more bytes of data has been read successfully, * or until an error occurs. * * @param buffers One or more buffers into which the data will be read. * * @param ec Set to indicate what error occurred, if any. * * @returns The number of bytes read. Returns 0 if an error occurred. * * @note The read_some operation may not read all of the requested number of * bytes. Consider using the @ref read function if you need to ensure that * the requested amount of data is read before the blocking operation * completes. */ template std::size_t read_some(const MutableBufferSequence& buffers, asio::error_code& ec) { return impl_.get_service().read_some( impl_.get_implementation(), buffers, ec); } /// Start an asynchronous read. /** * This function is used to asynchronously read data from the serial port. * The function call always returns immediately. * * @param buffers One or more buffers into which the data will be read. * Although the buffers object may be copied as necessary, ownership of the * underlying memory blocks is retained by the caller, which must guarantee * that they remain valid until the handler is called. * * @param handler The handler to be called when the read operation completes. * Copies will be made of the handler as required. The function signature of * the handler must be: * @code void handler( * const asio::error_code& error, // Result of operation. * std::size_t bytes_transferred // Number of bytes read. * ); @endcode * Regardless of whether the asynchronous operation completes immediately or * not, the handler will not be invoked from within this function. On * immediate completion, invocation of the handler will be performed in a * manner equivalent to using asio::post(). * * @note The read operation may not read all of the requested number of bytes. * Consider using the @ref async_read function if you need to ensure that the * requested amount of data is read before the asynchronous operation * completes. * * @par Example * To read into a single data buffer use the @ref buffer function as follows: * @code * basic_serial_port.async_read_some( * asio::buffer(data, size), handler); * @endcode * See the @ref buffer documentation for information on reading into multiple * buffers in one go, and how to use it with arrays, boost::array or * std::vector. */ template ASIO_INITFN_RESULT_TYPE(ReadHandler, void (asio::error_code, std::size_t)) async_read_some(const MutableBufferSequence& buffers, ASIO_MOVE_ARG(ReadHandler) handler) { return async_initiate( initiate_async_read_some(), handler, this, buffers); } private: // Disallow copying and assignment. basic_serial_port(const basic_serial_port&) ASIO_DELETED; basic_serial_port& operator=(const basic_serial_port&) ASIO_DELETED; struct initiate_async_write_some { template void operator()(ASIO_MOVE_ARG(WriteHandler) handler, basic_serial_port* self, const ConstBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a WriteHandler. ASIO_WRITE_HANDLER_CHECK(WriteHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_write_some( self->impl_.get_implementation(), buffers, handler2.value, self->impl_.get_implementation_executor()); } }; struct initiate_async_read_some { template void operator()(ASIO_MOVE_ARG(ReadHandler) handler, basic_serial_port* self, const MutableBufferSequence& buffers) const { // If you get an error on the following line it means that your handler // does not meet the documented type requirements for a ReadHandler. ASIO_READ_HANDLER_CHECK(ReadHandler, handler) type_check; detail::non_const_lvalue handler2(handler); self->impl_.get_service().async_read_some( self->impl_.get_implementation(), buffers, handler2.value, self->impl_.get_implementation_executor()); } }; #if defined(ASIO_HAS_IOCP) detail::io_object_impl impl_; #else detail::io_object_impl impl_; #endif }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // defined(ASIO_HAS_SERIAL_PORT) // || defined(GENERATING_DOCUMENTATION) #endif // ASIO_BASIC_SERIAL_PORT_HPP galera-4-26.4.25/asio/asio/spawn.hpp000644 000164 177776 00000026060 15107057155 020247 0ustar00jenkinsnogroup000000 000000 // // spawn.hpp // ~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_SPAWN_HPP #define ASIO_SPAWN_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include "asio/bind_executor.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/type_traits.hpp" #include "asio/detail/wrapped_handler.hpp" #include "asio/executor.hpp" #include "asio/io_context.hpp" #include "asio/is_executor.hpp" #include "asio/strand.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Context object the represents the currently executing coroutine. /** * The basic_yield_context class is used to represent the currently executing * stackful coroutine. A basic_yield_context may be passed as a handler to an * asynchronous operation. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield); * ... * } @endcode * * The initiating function (async_read_some in the above example) suspends the * current coroutine. The coroutine is resumed when the asynchronous operation * completes, and the result of the operation is returned. */ template class basic_yield_context { public: /// The coroutine callee type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code push_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined callee_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::push_coroutine callee_type; #else typedef boost::coroutines::coroutine callee_type; #endif /// The coroutine caller type, used by the implementation. /** * When using Boost.Coroutine v1, this type is: * @code typename coroutine::caller_type @endcode * When using Boost.Coroutine v2 (unidirectional coroutines), this type is: * @code pull_coroutine @endcode */ #if defined(GENERATING_DOCUMENTATION) typedef implementation_defined caller_type; #elif defined(BOOST_COROUTINES_UNIDIRECT) || defined(BOOST_COROUTINES_V2) typedef boost::coroutines::pull_coroutine caller_type; #else typedef boost::coroutines::coroutine::caller_type caller_type; #endif /// Construct a yield context to represent the specified coroutine. /** * Most applications do not need to use this constructor. Instead, the * spawn() function passes a yield context as an argument to the coroutine * function. */ basic_yield_context( const detail::weak_ptr& coro, caller_type& ca, Handler& handler) : coro_(coro), ca_(ca), handler_(handler), ec_(0) { } /// Construct a yield context from another yield context type. /** * Requires that OtherHandler be convertible to Handler. */ template basic_yield_context(const basic_yield_context& other) : coro_(other.coro_), ca_(other.ca_), handler_(other.handler_), ec_(other.ec_) { } /// Return a yield context that sets the specified error_code. /** * By default, when a yield context is used with an asynchronous operation, a * non-success error_code is converted to system_error and thrown. This * operator may be used to specify an error_code object that should instead be * set with the asynchronous operation's result. For example: * * @code template * void my_coroutine(basic_yield_context yield) * { * ... * std::size_t n = my_socket.async_read_some(buffer, yield[ec]); * if (ec) * { * // An error occurred. * } * ... * } @endcode */ basic_yield_context operator[](asio::error_code& ec) const { basic_yield_context tmp(*this); tmp.ec_ = &ec; return tmp; } #if defined(GENERATING_DOCUMENTATION) private: #endif // defined(GENERATING_DOCUMENTATION) detail::weak_ptr coro_; caller_type& ca_; Handler handler_; asio::error_code* ec_; }; #if defined(GENERATING_DOCUMENTATION) /// Context object that represents the currently executing coroutine. typedef basic_yield_context yield_context; #else // defined(GENERATING_DOCUMENTATION) typedef basic_yield_context< executor_binder > yield_context; #endif // defined(GENERATING_DOCUMENTATION) /** * @defgroup spawn asio::spawn * * @brief Start a new stackful coroutine. * * The spawn() function is a high-level wrapper over the Boost.Coroutine * library. This function enables programs to implement asynchronous logic in a * synchronous manner, as illustrated by the following example: * * @code asio::spawn(my_strand, do_echo); * * // ... * * void do_echo(asio::yield_context yield) * { * try * { * char data[128]; * for (;;) * { * std::size_t length = * my_socket.async_read_some( * asio::buffer(data), yield); * * asio::async_write(my_socket, * asio::buffer(data, length), yield); * } * } * catch (std::exception& e) * { * // ... * } * } @endcode */ /*@{*/ /// Start a new stackful coroutine, calling the specified handler when it /// completes. /** * This function is used to launch a new coroutine. * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine, calling the specified handler when it /// completes. /** * This function is used to launch a new coroutine. * * @param handler A handler to be called when the coroutine exits. More * importantly, the handler provides an execution context (via the the handler * invocation hook) for the coroutine. The handler must have the signature: * @code void handler(); @endcode * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(ASIO_MOVE_ARG(Handler) handler, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes(), typename enable_if::type>::value && !is_convertible::value>::type* = 0); /// Start a new stackful coroutine, inheriting the execution context of another. /** * This function is used to launch a new coroutine. * * @param ctx Identifies the current coroutine as a parent of the new * coroutine. This specifies that the new coroutine should inherit the * execution context of the parent. For example, if the parent coroutine is * executing in a particular strand, then the new coroutine will execute in the * same strand. * * @param function The coroutine function. The function must have the signature: * @code void function(basic_yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(basic_yield_context ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes on a given executor. /** * This function is used to launch a new coroutine. * * @param ex Identifies the executor that will run the coroutine. The new * coroutine is implicitly given its own strand within this executor. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(const Executor& ex, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes(), typename enable_if::value>::type* = 0); /// Start a new stackful coroutine that executes on a given strand. /** * This function is used to launch a new coroutine. * * @param ex Identifies the strand that will run the coroutine. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(const strand& ex, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes in the context of a strand. /** * This function is used to launch a new coroutine. * * @param s Identifies a strand. By starting multiple coroutines on the same * strand, the implementation ensures that none of those coroutines can execute * simultaneously. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(const asio::io_context::strand& s, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes()); /// Start a new stackful coroutine that executes on a given execution context. /** * This function is used to launch a new coroutine. * * @param ctx Identifies the execution context that will run the coroutine. The * new coroutine is implicitly given its own strand within this execution * context. * * @param function The coroutine function. The function must have the signature: * @code void function(yield_context yield); @endcode * * @param attributes Boost.Coroutine attributes used to customise the coroutine. */ template void spawn(ExecutionContext& ctx, ASIO_MOVE_ARG(Function) function, const boost::coroutines::attributes& attributes = boost::coroutines::attributes(), typename enable_if::value>::type* = 0); /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/spawn.hpp" #endif // ASIO_SPAWN_HPP galera-4-26.4.25/asio/asio/buffer.hpp000644 000164 177776 00000235103 15107057155 020370 0ustar00jenkinsnogroup000000 000000 // // buffer.hpp // ~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_BUFFER_HPP #define ASIO_BUFFER_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include #include #include #include #include #include #include "asio/detail/array_fwd.hpp" #include "asio/detail/memory.hpp" #include "asio/detail/string_view.hpp" #include "asio/detail/throw_exception.hpp" #include "asio/detail/type_traits.hpp" #if defined(ASIO_MSVC) && (ASIO_MSVC >= 1700) # if defined(_HAS_ITERATOR_DEBUGGING) && (_HAS_ITERATOR_DEBUGGING != 0) # if !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # define ASIO_ENABLE_BUFFER_DEBUGGING # endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # endif // defined(_HAS_ITERATOR_DEBUGGING) #endif // defined(ASIO_MSVC) && (ASIO_MSVC >= 1700) #if defined(__GNUC__) # if defined(_GLIBCXX_DEBUG) # if !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # define ASIO_ENABLE_BUFFER_DEBUGGING # endif // !defined(ASIO_DISABLE_BUFFER_DEBUGGING) # endif // defined(_GLIBCXX_DEBUG) #endif // defined(__GNUC__) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) # include "asio/detail/functional.hpp" #endif // ASIO_ENABLE_BUFFER_DEBUGGING #if defined(ASIO_HAS_BOOST_WORKAROUND) # include # if BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582)) \ || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590)) # define ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND # endif // BOOST_WORKAROUND(__BORLANDC__, BOOST_TESTED_AT(0x582)) // || BOOST_WORKAROUND(__SUNPRO_CC, BOOST_TESTED_AT(0x590)) #endif // defined(ASIO_HAS_BOOST_WORKAROUND) #if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) # include "asio/detail/type_traits.hpp" #endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) #include "asio/detail/push_options.hpp" namespace asio { class mutable_buffer; class const_buffer; /// Holds a buffer that can be modified. /** * The mutable_buffer class provides a safe representation of a buffer that can * be modified. It does not own the underlying data, and so is cheap to copy or * assign. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @c data() and @c size() * member functions: * * @code asio::mutable_buffer b1 = ...; * std::size_t s1 = b1.size(); * unsigned char* p1 = static_cast(b1.data()); * @endcode * * The @c data() member function permits violations of type safety, so uses of * it in application code should be carefully considered. */ class mutable_buffer { public: /// Construct an empty buffer. mutable_buffer() ASIO_NOEXCEPT : data_(0), size_(0) { } /// Construct a buffer to represent a given memory range. mutable_buffer(void* data, std::size_t size) ASIO_NOEXCEPT : data_(data), size_(size) { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) mutable_buffer(void* data, std::size_t size, asio::detail::function debug_check) : data_(data), size_(size), debug_check_(debug_check) { } const asio::detail::function& get_debug_check() const { return debug_check_; } #endif // ASIO_ENABLE_BUFFER_DEBUGGING /// Get a pointer to the beginning of the memory range. void* data() const ASIO_NOEXCEPT { #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (size_ && debug_check_) debug_check_(); #endif // ASIO_ENABLE_BUFFER_DEBUGGING return data_; } /// Get the size of the memory range. std::size_t size() const ASIO_NOEXCEPT { return size_; } /// Move the start of the buffer by the specified number of bytes. mutable_buffer& operator+=(std::size_t n) ASIO_NOEXCEPT { std::size_t offset = n < size_ ? n : size_; data_ = static_cast(data_) + offset; size_ -= offset; return *this; } private: void* data_; std::size_t size_; #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) asio::detail::function debug_check_; #endif // ASIO_ENABLE_BUFFER_DEBUGGING }; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use mutable_buffer.) Adapts a single modifiable buffer so that /// it meets the requirements of the MutableBufferSequence concept. class mutable_buffers_1 : public mutable_buffer { public: /// The type for each element in the list of buffers. typedef mutable_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const mutable_buffer* const_iterator; /// Construct to represent a given memory range. mutable_buffers_1(void* data, std::size_t size) ASIO_NOEXCEPT : mutable_buffer(data, size) { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) mutable_buffers_1(void* data, std::size_t size, asio::detail::function debug_check) : mutable_buffer(data, size, debug_check) { } #endif // ASIO_ENABLE_BUFFER_DEBUGGING /// Construct to represent a single modifiable buffer. explicit mutable_buffers_1(const mutable_buffer& b) ASIO_NOEXCEPT : mutable_buffer(b) { } /// Get a random-access iterator to the first element. const_iterator begin() const ASIO_NOEXCEPT { return this; } /// Get a random-access iterator for one past the last element. const_iterator end() const ASIO_NOEXCEPT { return begin() + 1; } }; #endif // !defined(ASIO_NO_DEPRECATED) /// Holds a buffer that cannot be modified. /** * The const_buffer class provides a safe representation of a buffer that cannot * be modified. It does not own the underlying data, and so is cheap to copy or * assign. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @c data() and @c size() * member functions: * * @code asio::const_buffer b1 = ...; * std::size_t s1 = b1.size(); * const unsigned char* p1 = static_cast(b1.data()); * @endcode * * The @c data() member function permits violations of type safety, so uses of * it in application code should be carefully considered. */ class const_buffer { public: /// Construct an empty buffer. const_buffer() ASIO_NOEXCEPT : data_(0), size_(0) { } /// Construct a buffer to represent a given memory range. const_buffer(const void* data, std::size_t size) ASIO_NOEXCEPT : data_(data), size_(size) { } /// Construct a non-modifiable buffer from a modifiable one. const_buffer(const mutable_buffer& b) ASIO_NOEXCEPT : data_(b.data()), size_(b.size()) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , debug_check_(b.get_debug_check()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) const_buffer(const void* data, std::size_t size, asio::detail::function debug_check) : data_(data), size_(size), debug_check_(debug_check) { } const asio::detail::function& get_debug_check() const { return debug_check_; } #endif // ASIO_ENABLE_BUFFER_DEBUGGING /// Get a pointer to the beginning of the memory range. const void* data() const ASIO_NOEXCEPT { #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) if (size_ && debug_check_) debug_check_(); #endif // ASIO_ENABLE_BUFFER_DEBUGGING return data_; } /// Get the size of the memory range. std::size_t size() const ASIO_NOEXCEPT { return size_; } /// Move the start of the buffer by the specified number of bytes. const_buffer& operator+=(std::size_t n) ASIO_NOEXCEPT { std::size_t offset = n < size_ ? n : size_; data_ = static_cast(data_) + offset; size_ -= offset; return *this; } private: const void* data_; std::size_t size_; #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) asio::detail::function debug_check_; #endif // ASIO_ENABLE_BUFFER_DEBUGGING }; #if !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use const_buffer.) Adapts a single non-modifiable buffer so /// that it meets the requirements of the ConstBufferSequence concept. class const_buffers_1 : public const_buffer { public: /// The type for each element in the list of buffers. typedef const_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const const_buffer* const_iterator; /// Construct to represent a given memory range. const_buffers_1(const void* data, std::size_t size) ASIO_NOEXCEPT : const_buffer(data, size) { } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) const_buffers_1(const void* data, std::size_t size, asio::detail::function debug_check) : const_buffer(data, size, debug_check) { } #endif // ASIO_ENABLE_BUFFER_DEBUGGING /// Construct to represent a single non-modifiable buffer. explicit const_buffers_1(const const_buffer& b) ASIO_NOEXCEPT : const_buffer(b) { } /// Get a random-access iterator to the first element. const_iterator begin() const ASIO_NOEXCEPT { return this; } /// Get a random-access iterator for one past the last element. const_iterator end() const ASIO_NOEXCEPT { return begin() + 1; } }; #endif // !defined(ASIO_NO_DEPRECATED) /// (Deprecated: Use the socket/descriptor wait() and async_wait() member /// functions.) An implementation of both the ConstBufferSequence and /// MutableBufferSequence concepts to represent a null buffer sequence. class null_buffers { public: /// The type for each element in the list of buffers. typedef mutable_buffer value_type; /// A random-access iterator type that may be used to read elements. typedef const mutable_buffer* const_iterator; /// Get a random-access iterator to the first element. const_iterator begin() const ASIO_NOEXCEPT { return &buf_; } /// Get a random-access iterator for one past the last element. const_iterator end() const ASIO_NOEXCEPT { return &buf_; } private: mutable_buffer buf_; }; /** @defgroup buffer_sequence_begin asio::buffer_sequence_begin * * @brief The asio::buffer_sequence_begin function returns an iterator * pointing to the first element in a buffer sequence. */ /*@{*/ /// Get an iterator to the first element in a buffer sequence. template inline const mutable_buffer* buffer_sequence_begin(const MutableBuffer& b, typename enable_if< is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return static_cast(detail::addressof(b)); } /// Get an iterator to the first element in a buffer sequence. template inline const const_buffer* buffer_sequence_begin(const ConstBuffer& b, typename enable_if< is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return static_cast(detail::addressof(b)); } #if defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) /// Get an iterator to the first element in a buffer sequence. template inline auto buffer_sequence_begin(C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT -> decltype(c.begin()) { return c.begin(); } /// Get an iterator to the first element in a buffer sequence. template inline auto buffer_sequence_begin(const C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT -> decltype(c.begin()) { return c.begin(); } #else // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) template inline typename C::iterator buffer_sequence_begin(C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return c.begin(); } template inline typename C::const_iterator buffer_sequence_begin(const C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return c.begin(); } #endif // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) /*@}*/ /** @defgroup buffer_sequence_end asio::buffer_sequence_end * * @brief The asio::buffer_sequence_end function returns an iterator * pointing to one past the end element in a buffer sequence. */ /*@{*/ /// Get an iterator to one past the end element in a buffer sequence. template inline const mutable_buffer* buffer_sequence_end(const MutableBuffer& b, typename enable_if< is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return static_cast(detail::addressof(b)) + 1; } /// Get an iterator to one past the end element in a buffer sequence. template inline const const_buffer* buffer_sequence_end(const ConstBuffer& b, typename enable_if< is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return static_cast(detail::addressof(b)) + 1; } #if defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) /// Get an iterator to one past the end element in a buffer sequence. template inline auto buffer_sequence_end(C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT -> decltype(c.end()) { return c.end(); } /// Get an iterator to one past the end element in a buffer sequence. template inline auto buffer_sequence_end(const C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT -> decltype(c.end()) { return c.end(); } #else // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) template inline typename C::iterator buffer_sequence_end(C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return c.end(); } template inline typename C::const_iterator buffer_sequence_end(const C& c, typename enable_if< !is_convertible::value && !is_convertible::value >::type* = 0) ASIO_NOEXCEPT { return c.end(); } #endif // defined(ASIO_HAS_DECLTYPE) || defined(GENERATING_DOCUMENTATION) /*@}*/ namespace detail { // Tag types used to select appropriately optimised overloads. struct one_buffer {}; struct multiple_buffers {}; // Helper trait to detect single buffers. template struct buffer_sequence_cardinality : conditional< is_same::value #if !defined(ASIO_NO_DEPRECATED) || is_same::value || is_same::value #endif // !defined(ASIO_NO_DEPRECATED) || is_same::value, one_buffer, multiple_buffers>::type {}; template inline std::size_t buffer_size(one_buffer, Iterator begin, Iterator) ASIO_NOEXCEPT { return const_buffer(*begin).size(); } template inline std::size_t buffer_size(multiple_buffers, Iterator begin, Iterator end) ASIO_NOEXCEPT { std::size_t total_buffer_size = 0; Iterator iter = begin; for (; iter != end; ++iter) { const_buffer b(*iter); total_buffer_size += b.size(); } return total_buffer_size; } } // namespace detail /// Get the total number of bytes in a buffer sequence. /** * The @c buffer_size function determines the total size of all buffers in the * buffer sequence, as if computed as follows: * * @code size_t total_size = 0; * auto i = asio::buffer_sequence_begin(buffers); * auto end = asio::buffer_sequence_end(buffers); * for (; i != end; ++i) * { * const_buffer b(*i); * total_size += b.size(); * } * return total_size; @endcode * * The @c BufferSequence template parameter may meet either of the @c * ConstBufferSequence or @c MutableBufferSequence type requirements. */ template inline std::size_t buffer_size(const BufferSequence& b) ASIO_NOEXCEPT { return detail::buffer_size( detail::buffer_sequence_cardinality(), asio::buffer_sequence_begin(b), asio::buffer_sequence_end(b)); } #if !defined(ASIO_NO_DEPRECATED) /** @defgroup buffer_cast asio::buffer_cast * * @brief (Deprecated: Use the @c data() member function.) The * asio::buffer_cast function is used to obtain a pointer to the * underlying memory region associated with a buffer. * * @par Examples: * * To access the memory of a non-modifiable buffer, use: * @code asio::const_buffer b1 = ...; * const unsigned char* p1 = asio::buffer_cast(b1); * @endcode * * To access the memory of a modifiable buffer, use: * @code asio::mutable_buffer b2 = ...; * unsigned char* p2 = asio::buffer_cast(b2); * @endcode * * The asio::buffer_cast function permits violations of type safety, so * uses of it in application code should be carefully considered. */ /*@{*/ /// Cast a non-modifiable buffer to a specified pointer to POD type. template inline PointerToPodType buffer_cast(const mutable_buffer& b) ASIO_NOEXCEPT { return static_cast(b.data()); } /// Cast a non-modifiable buffer to a specified pointer to POD type. template inline PointerToPodType buffer_cast(const const_buffer& b) ASIO_NOEXCEPT { return static_cast(b.data()); } /*@}*/ #endif // !defined(ASIO_NO_DEPRECATED) /// Create a new modifiable buffer that is offset from the start of another. /** * @relates mutable_buffer */ inline mutable_buffer operator+(const mutable_buffer& b, std::size_t n) ASIO_NOEXCEPT { std::size_t offset = n < b.size() ? n : b.size(); char* new_data = static_cast(b.data()) + offset; std::size_t new_size = b.size() - offset; return mutable_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that is offset from the start of another. /** * @relates mutable_buffer */ inline mutable_buffer operator+(std::size_t n, const mutable_buffer& b) ASIO_NOEXCEPT { return b + n; } /// Create a new non-modifiable buffer that is offset from the start of another. /** * @relates const_buffer */ inline const_buffer operator+(const const_buffer& b, std::size_t n) ASIO_NOEXCEPT { std::size_t offset = n < b.size() ? n : b.size(); const char* new_data = static_cast(b.data()) + offset; std::size_t new_size = b.size() - offset; return const_buffer(new_data, new_size #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that is offset from the start of another. /** * @relates const_buffer */ inline const_buffer operator+(std::size_t n, const const_buffer& b) ASIO_NOEXCEPT { return b + n; } #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) namespace detail { template class buffer_debug_check { public: buffer_debug_check(Iterator iter) : iter_(iter) { } ~buffer_debug_check() { #if defined(ASIO_MSVC) && (ASIO_MSVC == 1400) // MSVC 8's string iterator checking may crash in a std::string::iterator // object's destructor when the iterator points to an already-destroyed // std::string object, unless the iterator is cleared first. iter_ = Iterator(); #endif // defined(ASIO_MSVC) && (ASIO_MSVC == 1400) } void operator()() { (void)*iter_; } private: Iterator iter_; }; } // namespace detail #endif // ASIO_ENABLE_BUFFER_DEBUGGING /** @defgroup buffer asio::buffer * * @brief The asio::buffer function is used to create a buffer object to * represent raw memory, an array of POD elements, a vector of POD elements, * or a std::string. * * A buffer object represents a contiguous region of memory as a 2-tuple * consisting of a pointer and size in bytes. A tuple of the form {void*, * size_t} specifies a mutable (modifiable) region of memory. Similarly, a * tuple of the form {const void*, size_t} specifies a const * (non-modifiable) region of memory. These two forms correspond to the classes * mutable_buffer and const_buffer, respectively. To mirror C++'s conversion * rules, a mutable_buffer is implicitly convertible to a const_buffer, and the * opposite conversion is not permitted. * * The simplest use case involves reading or writing a single buffer of a * specified size: * * @code sock.send(asio::buffer(data, size)); @endcode * * In the above example, the return value of asio::buffer meets the * requirements of the ConstBufferSequence concept so that it may be directly * passed to the socket's write function. A buffer created for modifiable * memory also meets the requirements of the MutableBufferSequence concept. * * An individual buffer may be created from a builtin array, std::vector, * std::array or boost::array of POD elements. This helps prevent buffer * overruns by automatically determining the size of the buffer: * * @code char d1[128]; * size_t bytes_transferred = sock.receive(asio::buffer(d1)); * * std::vector d2(128); * bytes_transferred = sock.receive(asio::buffer(d2)); * * std::array d3; * bytes_transferred = sock.receive(asio::buffer(d3)); * * boost::array d4; * bytes_transferred = sock.receive(asio::buffer(d4)); @endcode * * In all three cases above, the buffers created are exactly 128 bytes long. * Note that a vector is @e never automatically resized when creating or using * a buffer. The buffer size is determined using the vector's size() * member function, and not its capacity. * * @par Accessing Buffer Contents * * The contents of a buffer may be accessed using the @c data() and @c size() * member functions: * * @code asio::mutable_buffer b1 = ...; * std::size_t s1 = b1.size(); * unsigned char* p1 = static_cast(b1.data()); * * asio::const_buffer b2 = ...; * std::size_t s2 = b2.size(); * const void* p2 = b2.data(); @endcode * * The @c data() member function permits violations of type safety, so * uses of it in application code should be carefully considered. * * For convenience, a @ref buffer_size function is provided that works with * both buffers and buffer sequences (that is, types meeting the * ConstBufferSequence or MutableBufferSequence type requirements). In this * case, the function returns the total size of all buffers in the sequence. * * @par Buffer Copying * * The @ref buffer_copy function may be used to copy raw bytes between * individual buffers and buffer sequences. * * In particular, when used with the @ref buffer_size function, the @ref * buffer_copy function can be used to linearise a sequence of buffers. For * example: * * @code vector buffers = ...; * * vector data(asio::buffer_size(buffers)); * asio::buffer_copy(asio::buffer(data), buffers); @endcode * * Note that @ref buffer_copy is implemented in terms of @c memcpy, and * consequently it cannot be used to copy between overlapping memory regions. * * @par Buffer Invalidation * * A buffer object does not have any ownership of the memory it refers to. It * is the responsibility of the application to ensure the memory region remains * valid until it is no longer required for an I/O operation. When the memory * is no longer available, the buffer is said to have been invalidated. * * For the asio::buffer overloads that accept an argument of type * std::vector, the buffer objects returned are invalidated by any vector * operation that also invalidates all references, pointers and iterators * referring to the elements in the sequence (C++ Std, 23.2.4) * * For the asio::buffer overloads that accept an argument of type * std::basic_string, the buffer objects returned are invalidated according to * the rules defined for invalidation of references, pointers and iterators * referring to elements of the sequence (C++ Std, 21.3). * * @par Buffer Arithmetic * * Buffer objects may be manipulated using simple arithmetic in a safe way * which helps prevent buffer overruns. Consider an array initialised as * follows: * * @code boost::array a = { 'a', 'b', 'c', 'd', 'e' }; @endcode * * A buffer object @c b1 created using: * * @code b1 = asio::buffer(a); @endcode * * represents the entire array, { 'a', 'b', 'c', 'd', 'e' }. An * optional second argument to the asio::buffer function may be used to * limit the size, in bytes, of the buffer: * * @code b2 = asio::buffer(a, 3); @endcode * * such that @c b2 represents the data { 'a', 'b', 'c' }. Even if the * size argument exceeds the actual size of the array, the size of the buffer * object created will be limited to the array size. * * An offset may be applied to an existing buffer to create a new one: * * @code b3 = b1 + 2; @endcode * * where @c b3 will set to represent { 'c', 'd', 'e' }. If the offset * exceeds the size of the existing buffer, the newly created buffer will be * empty. * * Both an offset and size may be specified to create a buffer that corresponds * to a specific range of bytes within an existing buffer: * * @code b4 = asio::buffer(b1 + 1, 3); @endcode * * so that @c b4 will refer to the bytes { 'b', 'c', 'd' }. * * @par Buffers and Scatter-Gather I/O * * To read or write using multiple buffers (i.e. scatter-gather I/O), multiple * buffer objects may be assigned into a container that supports the * MutableBufferSequence (for read) or ConstBufferSequence (for write) concepts: * * @code * char d1[128]; * std::vector d2(128); * boost::array d3; * * boost::array bufs1 = { * asio::buffer(d1), * asio::buffer(d2), * asio::buffer(d3) }; * bytes_transferred = sock.receive(bufs1); * * std::vector bufs2; * bufs2.push_back(asio::buffer(d1)); * bufs2.push_back(asio::buffer(d2)); * bufs2.push_back(asio::buffer(d3)); * bytes_transferred = sock.send(bufs2); @endcode */ /*@{*/ #if defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) # define ASIO_MUTABLE_BUFFER mutable_buffer # define ASIO_CONST_BUFFER const_buffer #else // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) # define ASIO_MUTABLE_BUFFER mutable_buffers_1 # define ASIO_CONST_BUFFER const_buffers_1 #endif // defined(ASIO_NO_DEPRECATED) || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer from an existing buffer. /** * @returns mutable_buffer(b). */ inline ASIO_MUTABLE_BUFFER buffer( const mutable_buffer& b) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(b); } /// Create a new modifiable buffer from an existing buffer. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * b.data(), * min(b.size(), max_size_in_bytes)); @endcode */ inline ASIO_MUTABLE_BUFFER buffer(const mutable_buffer& b, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER( mutable_buffer(b.data(), b.size() < max_size_in_bytes ? b.size() : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING )); } /// Create a new non-modifiable buffer from an existing buffer. /** * @returns const_buffer(b). */ inline ASIO_CONST_BUFFER buffer( const const_buffer& b) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(b); } /// Create a new non-modifiable buffer from an existing buffer. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * b.data(), * min(b.size(), max_size_in_bytes)); @endcode */ inline ASIO_CONST_BUFFER buffer(const const_buffer& b, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(b.data(), b.size() < max_size_in_bytes ? b.size() : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , b.get_debug_check() #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that represents the given memory range. /** * @returns mutable_buffer(data, size_in_bytes). */ inline ASIO_MUTABLE_BUFFER buffer(void* data, std::size_t size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data, size_in_bytes); } /// Create a new non-modifiable buffer that represents the given memory range. /** * @returns const_buffer(data, size_in_bytes). */ inline ASIO_CONST_BUFFER buffer(const void* data, std::size_t size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data, size_in_bytes); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * static_cast(data), * N * sizeof(PodType)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer(PodType (&data)[N]) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data, N * sizeof(PodType)); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * static_cast(data), * min(N * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer(PodType (&data)[N], std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data, N * sizeof(PodType) < max_size_in_bytes ? N * sizeof(PodType) : max_size_in_bytes); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * static_cast(data), * N * sizeof(PodType)); @endcode */ template inline ASIO_CONST_BUFFER buffer( const PodType (&data)[N]) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data, N * sizeof(PodType)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * static_cast(data), * min(N * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer(const PodType (&data)[N], std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data, N * sizeof(PodType) < max_size_in_bytes ? N * sizeof(PodType) : max_size_in_bytes); } #if defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) // Borland C++ and Sun Studio think the overloads: // // unspecified buffer(boost::array& array ...); // // and // // unspecified buffer(boost::array& array ...); // // are ambiguous. This will be worked around by using a buffer_types traits // class that contains typedefs for the appropriate buffer and container // classes, based on whether PodType is const or non-const. namespace detail { template struct buffer_types_base; template <> struct buffer_types_base { typedef mutable_buffer buffer_type; typedef ASIO_MUTABLE_BUFFER container_type; }; template <> struct buffer_types_base { typedef const_buffer buffer_type; typedef ASIO_CONST_BUFFER container_type; }; template struct buffer_types : public buffer_types_base::value> { }; } // namespace detail template inline typename detail::buffer_types::container_type buffer(boost::array& data) ASIO_NOEXCEPT { typedef typename asio::detail::buffer_types::buffer_type buffer_type; typedef typename asio::detail::buffer_types::container_type container_type; return container_type( buffer_type(data.c_array(), data.size() * sizeof(PodType))); } template inline typename detail::buffer_types::container_type buffer(boost::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { typedef typename asio::detail::buffer_types::buffer_type buffer_type; typedef typename asio::detail::buffer_types::container_type container_type; return container_type( buffer_type(data.c_array(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes)); } #else // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer( boost::array& data) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER( data.c_array(), data.size() * sizeof(PodType)); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer(boost::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.c_array(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_CONST_BUFFER buffer( boost::array& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer(boost::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } #endif // defined(ASIO_ENABLE_ARRAY_BUFFER_WORKAROUND) /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_CONST_BUFFER buffer( const boost::array& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer(const boost::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } #if defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer( std::array& data) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.data(), data.size() * sizeof(PodType)); } /// Create a new modifiable buffer that represents the given POD array. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_MUTABLE_BUFFER buffer(std::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_CONST_BUFFER buffer( std::array& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer(std::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * data.size() * sizeof(PodType)); @endcode */ template inline ASIO_CONST_BUFFER buffer( const std::array& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType)); } /// Create a new non-modifiable buffer that represents the given POD array. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer(const std::array& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes); } #endif // defined(ASIO_HAS_STD_ARRAY) || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer that represents the given POD vector. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.size() ? &data[0] : 0, * data.size() * sizeof(PodType)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline ASIO_MUTABLE_BUFFER buffer( std::vector& data) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER( data.size() ? &data[0] : 0, data.size() * sizeof(PodType) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that represents the given POD vector. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline ASIO_MUTABLE_BUFFER buffer(std::vector& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that represents the given POD vector. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.size() ? &data[0] : 0, * data.size() * sizeof(PodType)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline ASIO_CONST_BUFFER buffer( const std::vector& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER( data.size() ? &data[0] : 0, data.size() * sizeof(PodType) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that represents the given POD vector. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(PodType), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any vector operation that would also * invalidate iterators. */ template inline ASIO_CONST_BUFFER buffer( const std::vector& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(PodType) < max_size_in_bytes ? data.size() * sizeof(PodType) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::vector::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that represents the given string. /** * @returns mutable_buffer(data.size() ? &data[0] : 0, * data.size() * sizeof(Elem)). * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline ASIO_MUTABLE_BUFFER buffer( std::basic_string& data) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(Elem) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new modifiable buffer that represents the given string. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline ASIO_MUTABLE_BUFFER buffer( std::basic_string& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_MUTABLE_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(Elem) < max_size_in_bytes ? data.size() * sizeof(Elem) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that represents the given string. /** * @returns const_buffer(data.data(), data.size() * sizeof(Elem)). * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline ASIO_CONST_BUFFER buffer( const std::basic_string& data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(Elem) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that represents the given string. /** * @returns A const_buffer value equivalent to: * @code const_buffer( * data.data(), * min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode * * @note The buffer is invalidated by any non-const operation called on the * given string object. */ template inline ASIO_CONST_BUFFER buffer( const std::basic_string& data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.data(), data.size() * sizeof(Elem) < max_size_in_bytes ? data.size() * sizeof(Elem) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename std::basic_string::const_iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } #if defined(ASIO_HAS_STRING_VIEW) \ || defined(GENERATING_DOCUMENTATION) /// Create a new modifiable buffer that represents the given string_view. /** * @returns mutable_buffer(data.size() ? &data[0] : 0, * data.size() * sizeof(Elem)). */ template inline ASIO_CONST_BUFFER buffer( basic_string_view data) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(Elem) #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename basic_string_view::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } /// Create a new non-modifiable buffer that represents the given string. /** * @returns A mutable_buffer value equivalent to: * @code mutable_buffer( * data.size() ? &data[0] : 0, * min(data.size() * sizeof(Elem), max_size_in_bytes)); @endcode */ template inline ASIO_CONST_BUFFER buffer( basic_string_view data, std::size_t max_size_in_bytes) ASIO_NOEXCEPT { return ASIO_CONST_BUFFER(data.size() ? &data[0] : 0, data.size() * sizeof(Elem) < max_size_in_bytes ? data.size() * sizeof(Elem) : max_size_in_bytes #if defined(ASIO_ENABLE_BUFFER_DEBUGGING) , detail::buffer_debug_check< typename basic_string_view::iterator >(data.begin()) #endif // ASIO_ENABLE_BUFFER_DEBUGGING ); } #endif // defined(ASIO_HAS_STRING_VIEW) // || defined(GENERATING_DOCUMENTATION) /*@}*/ /// Adapt a basic_string to the DynamicBuffer requirements. /** * Requires that sizeof(Elem) == 1. */ template class dynamic_string_buffer { public: /// The type used to represent a sequence of constant buffers that refers to /// the underlying memory. typedef ASIO_CONST_BUFFER const_buffers_type; /// The type used to represent a sequence of mutable buffers that refers to /// the underlying memory. typedef ASIO_MUTABLE_BUFFER mutable_buffers_type; /// Construct a dynamic buffer from a string. /** * @param s The string to be used as backing storage for the dynamic buffer. * The object stores a reference to the string and the user is responsible * for ensuring that the string object remains valid while the * dynamic_string_buffer object, and copies of the object, are in use. * * @b DynamicBuffer_v1: Any existing data in the string is treated as the * dynamic buffer's input sequence. * * @param maximum_size Specifies a maximum size for the buffer, in bytes. */ explicit dynamic_string_buffer(std::basic_string& s, std::size_t maximum_size = (std::numeric_limits::max)()) ASIO_NOEXCEPT : string_(s), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_((std::numeric_limits::max)()), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(maximum_size) { } /// @b DynamicBuffer_v2: Copy construct a dynamic buffer. dynamic_string_buffer(const dynamic_string_buffer& other) ASIO_NOEXCEPT : string_(other.string_), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_(other.size_), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(other.max_size_) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move construct a dynamic buffer. dynamic_string_buffer(dynamic_string_buffer&& other) ASIO_NOEXCEPT : string_(other.string_), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_(other.size_), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(other.max_size_) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// @b DynamicBuffer_v1: Get the size of the input sequence. /// @b DynamicBuffer_v2: Get the current size of the underlying memory. /** * @returns @b DynamicBuffer_v1 The current size of the input sequence. * @b DynamicBuffer_v2: The current size of the underlying string if less than * max_size(). Otherwise returns max_size(). */ std::size_t size() const ASIO_NOEXCEPT { #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) if (size_ != (std::numeric_limits::max)()) return size_; #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) return (std::min)(string_.size(), max_size()); } /// Get the maximum size of the dynamic buffer. /** * @returns The allowed maximum size of the underlying memory. */ std::size_t max_size() const ASIO_NOEXCEPT { return max_size_; } /// Get the maximum size that the buffer may grow to without triggering /// reallocation. /** * @returns The current capacity of the underlying string if less than * max_size(). Otherwise returns max_size(). */ std::size_t capacity() const ASIO_NOEXCEPT { return (std::min)(string_.capacity(), max_size()); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v1: Get a list of buffers that represents the input /// sequence. /** * @returns An object of type @c const_buffers_type that satisfies * ConstBufferSequence requirements, representing the basic_string memory in * the input sequence. * * @note The returned object is invalidated by any @c dynamic_string_buffer * or @c basic_string member function that resizes or erases the string. */ const_buffers_type data() const ASIO_NOEXCEPT { return const_buffers_type(asio::buffer(string_, size_)); } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the /// underlying memory. /** * @param pos Position of the first byte to represent in the buffer sequence * * @param n The number of bytes to return in the buffer sequence. If the * underlying memory is shorter, the buffer sequence represents as many bytes * as are available. * * @returns An object of type @c mutable_buffers_type that satisfies * MutableBufferSequence requirements, representing the basic_string memory. * * @note The returned object is invalidated by any @c dynamic_string_buffer * or @c basic_string member function that resizes or erases the string. */ mutable_buffers_type data(std::size_t pos, std::size_t n) ASIO_NOEXCEPT { return mutable_buffers_type(asio::buffer( asio::buffer(string_, max_size_) + pos, n)); } /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the /// underlying memory. /** * @param pos Position of the first byte to represent in the buffer sequence * * @param n The number of bytes to return in the buffer sequence. If the * underlying memory is shorter, the buffer sequence represents as many bytes * as are available. * * @note The returned object is invalidated by any @c dynamic_string_buffer * or @c basic_string member function that resizes or erases the string. */ const_buffers_type data(std::size_t pos, std::size_t n) const ASIO_NOEXCEPT { return const_buffers_type(asio::buffer( asio::buffer(string_, max_size_) + pos, n)); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v1: Get a list of buffers that represents the output /// sequence, with the given size. /** * Ensures that the output sequence can accommodate @c n bytes, resizing the * basic_string object as necessary. * * @returns An object of type @c mutable_buffers_type that satisfies * MutableBufferSequence requirements, representing basic_string memory * at the start of the output sequence of size @c n. * * @throws std::length_error If size() + n > max_size(). * * @note The returned object is invalidated by any @c dynamic_string_buffer * or @c basic_string member function that modifies the input sequence or * output sequence. */ mutable_buffers_type prepare(std::size_t n) { if (size() > max_size() || max_size() - size() < n) { std::length_error ex("dynamic_string_buffer too long"); asio::detail::throw_exception(ex); } if (size_ == (std::numeric_limits::max)()) size_ = string_.size(); // Enable v1 behaviour. string_.resize(size_ + n); return asio::buffer(asio::buffer(string_) + size_, n); } /// @b DynamicBuffer_v1: Move bytes from the output sequence to the input /// sequence. /** * @param n The number of bytes to append from the start of the output * sequence to the end of the input sequence. The remainder of the output * sequence is discarded. * * Requires a preceding call prepare(x) where x >= n, and * no intervening operations that modify the input or output sequence. * * @note If @c n is greater than the size of the output sequence, the entire * output sequence is moved to the input sequence and no error is issued. */ void commit(std::size_t n) { size_ += (std::min)(n, string_.size() - size_); string_.resize(size_); } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v2: Grow the underlying memory by the specified number of /// bytes. /** * Resizes the string to accommodate an additional @c n bytes at the end. * * @throws std::length_error If size() + n > max_size(). */ void grow(std::size_t n) { if (size() > max_size() || max_size() - size() < n) { std::length_error ex("dynamic_string_buffer too long"); asio::detail::throw_exception(ex); } string_.resize(size() + n); } /// @b DynamicBuffer_v2: Shrink the underlying memory by the specified number /// of bytes. /** * Erases @c n bytes from the end of the string by resizing the basic_string * object. If @c n is greater than the current size of the string, the string * is emptied. */ void shrink(std::size_t n) { string_.resize(n > size() ? 0 : size() - n); } /// @b DynamicBuffer_v1: Remove characters from the input sequence. /// @b DynamicBuffer_v2: Consume the specified number of bytes from the /// beginning of the underlying memory. /** * @b DynamicBuffer_v1: Removes @c n characters from the beginning of the * input sequence. @note If @c n is greater than the size of the input * sequence, the entire input sequence is consumed and no error is issued. * * @b DynamicBuffer_v2: Erases @c n bytes from the beginning of the string. * If @c n is greater than the current size of the string, the string is * emptied. */ void consume(std::size_t n) { #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) if (size_ != (std::numeric_limits::max)()) { std::size_t consume_length = (std::min)(n, size_); string_.erase(0, consume_length); size_ -= consume_length; return; } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) string_.erase(0, n); } private: std::basic_string& string_; #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) std::size_t size_; #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) const std::size_t max_size_; }; /// Adapt a vector to the DynamicBuffer requirements. /** * Requires that sizeof(Elem) == 1. */ template class dynamic_vector_buffer { public: /// The type used to represent a sequence of constant buffers that refers to /// the underlying memory. typedef ASIO_CONST_BUFFER const_buffers_type; /// The type used to represent a sequence of mutable buffers that refers to /// the underlying memory. typedef ASIO_MUTABLE_BUFFER mutable_buffers_type; /// Construct a dynamic buffer from a vector. /** * @param v The vector to be used as backing storage for the dynamic buffer. * The object stores a reference to the vector and the user is responsible * for ensuring that the vector object remains valid while the * dynamic_vector_buffer object, and copies of the object, are in use. * * @param maximum_size Specifies a maximum size for the buffer, in bytes. */ explicit dynamic_vector_buffer(std::vector& v, std::size_t maximum_size = (std::numeric_limits::max)()) ASIO_NOEXCEPT : vector_(v), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_((std::numeric_limits::max)()), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(maximum_size) { } /// @b DynamicBuffer_v2: Copy construct a dynamic buffer. dynamic_vector_buffer(const dynamic_vector_buffer& other) ASIO_NOEXCEPT : vector_(other.vector_), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_(other.size_), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(other.max_size_) { } #if defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// Move construct a dynamic buffer. dynamic_vector_buffer(dynamic_vector_buffer&& other) ASIO_NOEXCEPT : vector_(other.vector_), #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) size_(other.size_), #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) max_size_(other.max_size_) { } #endif // defined(ASIO_HAS_MOVE) || defined(GENERATING_DOCUMENTATION) /// @b DynamicBuffer_v1: Get the size of the input sequence. /// @b DynamicBuffer_v2: Get the current size of the underlying memory. /** * @returns @b DynamicBuffer_v1 The current size of the input sequence. * @b DynamicBuffer_v2: The current size of the underlying vector if less than * max_size(). Otherwise returns max_size(). */ std::size_t size() const ASIO_NOEXCEPT { #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) if (size_ != (std::numeric_limits::max)()) return size_; #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) return (std::min)(vector_.size(), max_size()); } /// Get the maximum size of the dynamic buffer. /** * @returns @b DynamicBuffer_v1: The allowed maximum of the sum of the sizes * of the input sequence and output sequence. @b DynamicBuffer_v2: The allowed * maximum size of the underlying memory. */ std::size_t max_size() const ASIO_NOEXCEPT { return max_size_; } /// Get the maximum size that the buffer may grow to without triggering /// reallocation. /** * @returns @b DynamicBuffer_v1: The current total capacity of the buffer, * i.e. for both the input sequence and output sequence. @b DynamicBuffer_v2: * The current capacity of the underlying vector if less than max_size(). * Otherwise returns max_size(). */ std::size_t capacity() const ASIO_NOEXCEPT { return (std::min)(vector_.capacity(), max_size()); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v1: Get a list of buffers that represents the input /// sequence. /** * @returns An object of type @c const_buffers_type that satisfies * ConstBufferSequence requirements, representing the vector memory in the * input sequence. * * @note The returned object is invalidated by any @c dynamic_vector_buffer * or @c vector member function that modifies the input sequence or output * sequence. */ const_buffers_type data() const ASIO_NOEXCEPT { return const_buffers_type(asio::buffer(vector_, size_)); } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the /// underlying memory. /** * @param pos Position of the first byte to represent in the buffer sequence * * @param n The number of bytes to return in the buffer sequence. If the * underlying memory is shorter, the buffer sequence represents as many bytes * as are available. * * @returns An object of type @c mutable_buffers_type that satisfies * MutableBufferSequence requirements, representing the vector memory. * * @note The returned object is invalidated by any @c dynamic_vector_buffer * or @c vector member function that resizes or erases the vector. */ mutable_buffers_type data(std::size_t pos, std::size_t n) ASIO_NOEXCEPT { return mutable_buffers_type(asio::buffer( asio::buffer(vector_, max_size_) + pos, n)); } /// @b DynamicBuffer_v2: Get a sequence of buffers that represents the /// underlying memory. /** * @param pos Position of the first byte to represent in the buffer sequence * * @param n The number of bytes to return in the buffer sequence. If the * underlying memory is shorter, the buffer sequence represents as many bytes * as are available. * * @note The returned object is invalidated by any @c dynamic_vector_buffer * or @c vector member function that resizes or erases the vector. */ const_buffers_type data(std::size_t pos, std::size_t n) const ASIO_NOEXCEPT { return const_buffers_type(asio::buffer( asio::buffer(vector_, max_size_) + pos, n)); } #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v1: Get a list of buffers that represents the output /// sequence, with the given size. /** * Ensures that the output sequence can accommodate @c n bytes, resizing the * vector object as necessary. * * @returns An object of type @c mutable_buffers_type that satisfies * MutableBufferSequence requirements, representing vector memory at the * start of the output sequence of size @c n. * * @throws std::length_error If size() + n > max_size(). * * @note The returned object is invalidated by any @c dynamic_vector_buffer * or @c vector member function that modifies the input sequence or output * sequence. */ mutable_buffers_type prepare(std::size_t n) { if (size () > max_size() || max_size() - size() < n) { std::length_error ex("dynamic_vector_buffer too long"); asio::detail::throw_exception(ex); } if (size_ == (std::numeric_limits::max)()) size_ = vector_.size(); // Enable v1 behaviour. vector_.resize(size_ + n); return asio::buffer(asio::buffer(vector_) + size_, n); } /// @b DynamicBuffer_v1: Move bytes from the output sequence to the input /// sequence. /** * @param n The number of bytes to append from the start of the output * sequence to the end of the input sequence. The remainder of the output * sequence is discarded. * * Requires a preceding call prepare(x) where x >= n, and * no intervening operations that modify the input or output sequence. * * @note If @c n is greater than the size of the output sequence, the entire * output sequence is moved to the input sequence and no error is issued. */ void commit(std::size_t n) { size_ += (std::min)(n, vector_.size() - size_); vector_.resize(size_); } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// @b DynamicBuffer_v2: Grow the underlying memory by the specified number of /// bytes. /** * Resizes the vector to accommodate an additional @c n bytes at the end. * * @throws std::length_error If size() + n > max_size(). */ void grow(std::size_t n) { if (size() > max_size() || max_size() - size() < n) { std::length_error ex("dynamic_vector_buffer too long"); asio::detail::throw_exception(ex); } vector_.resize(size() + n); } /// @b DynamicBuffer_v2: Shrink the underlying memory by the specified number /// of bytes. /** * Erases @c n bytes from the end of the vector by resizing the vector * object. If @c n is greater than the current size of the vector, the vector * is emptied. */ void shrink(std::size_t n) { vector_.resize(n > size() ? 0 : size() - n); } /// @b DynamicBuffer_v1: Remove characters from the input sequence. /// @b DynamicBuffer_v2: Consume the specified number of bytes from the /// beginning of the underlying memory. /** * @b DynamicBuffer_v1: Removes @c n characters from the beginning of the * input sequence. @note If @c n is greater than the size of the input * sequence, the entire input sequence is consumed and no error is issued. * * @b DynamicBuffer_v2: Erases @c n bytes from the beginning of the vector. * If @c n is greater than the current size of the vector, the vector is * emptied. */ void consume(std::size_t n) { #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) if (size_ != (std::numeric_limits::max)()) { std::size_t consume_length = (std::min)(n, size_); vector_.erase(vector_.begin(), vector_.begin() + consume_length); size_ -= consume_length; return; } #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) vector_.erase(vector_.begin(), vector_.begin() + (std::min)(size(), n)); } private: std::vector& vector_; #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) std::size_t size_; #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) const std::size_t max_size_; }; /** @defgroup dynamic_buffer asio::dynamic_buffer * * @brief The asio::dynamic_buffer function is used to create a * dynamically resized buffer from a @c std::basic_string or @c std::vector. */ /*@{*/ /// Create a new dynamic buffer that represents the given string. /** * @returns dynamic_string_buffer(data). */ template inline dynamic_string_buffer dynamic_buffer( std::basic_string& data) ASIO_NOEXCEPT { return dynamic_string_buffer(data); } /// Create a new dynamic buffer that represents the given string. /** * @returns dynamic_string_buffer(data, * max_size). */ template inline dynamic_string_buffer dynamic_buffer( std::basic_string& data, std::size_t max_size) ASIO_NOEXCEPT { return dynamic_string_buffer(data, max_size); } /// Create a new dynamic buffer that represents the given vector. /** * @returns dynamic_vector_buffer(data). */ template inline dynamic_vector_buffer dynamic_buffer( std::vector& data) ASIO_NOEXCEPT { return dynamic_vector_buffer(data); } /// Create a new dynamic buffer that represents the given vector. /** * @returns dynamic_vector_buffer(data, max_size). */ template inline dynamic_vector_buffer dynamic_buffer( std::vector& data, std::size_t max_size) ASIO_NOEXCEPT { return dynamic_vector_buffer(data, max_size); } /*@}*/ /** @defgroup buffer_copy asio::buffer_copy * * @brief The asio::buffer_copy function is used to copy bytes from a * source buffer (or buffer sequence) to a target buffer (or buffer sequence). * * The @c buffer_copy function is available in two forms: * * @li A 2-argument form: @c buffer_copy(target, source) * * @li A 3-argument form: @c buffer_copy(target, source, max_bytes_to_copy) * * Both forms return the number of bytes actually copied. The number of bytes * copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c If specified, @c max_bytes_to_copy. * * This prevents buffer overflow, regardless of the buffer sizes used in the * copy operation. * * Note that @ref buffer_copy is implemented in terms of @c memcpy, and * consequently it cannot be used to copy between overlapping memory regions. */ /*@{*/ namespace detail { inline std::size_t buffer_copy_1(const mutable_buffer& target, const const_buffer& source) { using namespace std; // For memcpy. std::size_t target_size = target.size(); std::size_t source_size = source.size(); std::size_t n = target_size < source_size ? target_size : source_size; if (n > 0) memcpy(target.data(), source.data(), n); return n; } template inline std::size_t buffer_copy(one_buffer, one_buffer, TargetIterator target_begin, TargetIterator, SourceIterator source_begin, SourceIterator) ASIO_NOEXCEPT { return (buffer_copy_1)(*target_begin, *source_begin); } template inline std::size_t buffer_copy(one_buffer, one_buffer, TargetIterator target_begin, TargetIterator, SourceIterator source_begin, SourceIterator, std::size_t max_bytes_to_copy) ASIO_NOEXCEPT { return (buffer_copy_1)(*target_begin, asio::buffer(*source_begin, max_bytes_to_copy)); } template std::size_t buffer_copy(one_buffer, multiple_buffers, TargetIterator target_begin, TargetIterator, SourceIterator source_begin, SourceIterator source_end, std::size_t max_bytes_to_copy = (std::numeric_limits::max)()) ASIO_NOEXCEPT { std::size_t total_bytes_copied = 0; SourceIterator source_iter = source_begin; for (mutable_buffer target_buffer( asio::buffer(*target_begin, max_bytes_to_copy)); target_buffer.size() && source_iter != source_end; ++source_iter) { const_buffer source_buffer(*source_iter); std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer); total_bytes_copied += bytes_copied; target_buffer += bytes_copied; } return total_bytes_copied; } template std::size_t buffer_copy(multiple_buffers, one_buffer, TargetIterator target_begin, TargetIterator target_end, SourceIterator source_begin, SourceIterator, std::size_t max_bytes_to_copy = (std::numeric_limits::max)()) ASIO_NOEXCEPT { std::size_t total_bytes_copied = 0; TargetIterator target_iter = target_begin; for (const_buffer source_buffer( asio::buffer(*source_begin, max_bytes_to_copy)); source_buffer.size() && target_iter != target_end; ++target_iter) { mutable_buffer target_buffer(*target_iter); std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer); total_bytes_copied += bytes_copied; source_buffer += bytes_copied; } return total_bytes_copied; } template std::size_t buffer_copy(multiple_buffers, multiple_buffers, TargetIterator target_begin, TargetIterator target_end, SourceIterator source_begin, SourceIterator source_end) ASIO_NOEXCEPT { std::size_t total_bytes_copied = 0; TargetIterator target_iter = target_begin; std::size_t target_buffer_offset = 0; SourceIterator source_iter = source_begin; std::size_t source_buffer_offset = 0; while (target_iter != target_end && source_iter != source_end) { mutable_buffer target_buffer = mutable_buffer(*target_iter) + target_buffer_offset; const_buffer source_buffer = const_buffer(*source_iter) + source_buffer_offset; std::size_t bytes_copied = (buffer_copy_1)(target_buffer, source_buffer); total_bytes_copied += bytes_copied; if (bytes_copied == target_buffer.size()) { ++target_iter; target_buffer_offset = 0; } else target_buffer_offset += bytes_copied; if (bytes_copied == source_buffer.size()) { ++source_iter; source_buffer_offset = 0; } else source_buffer_offset += bytes_copied; } return total_bytes_copied; } template std::size_t buffer_copy(multiple_buffers, multiple_buffers, TargetIterator target_begin, TargetIterator target_end, SourceIterator source_begin, SourceIterator source_end, std::size_t max_bytes_to_copy) ASIO_NOEXCEPT { std::size_t total_bytes_copied = 0; TargetIterator target_iter = target_begin; std::size_t target_buffer_offset = 0; SourceIterator source_iter = source_begin; std::size_t source_buffer_offset = 0; while (total_bytes_copied != max_bytes_to_copy && target_iter != target_end && source_iter != source_end) { mutable_buffer target_buffer = mutable_buffer(*target_iter) + target_buffer_offset; const_buffer source_buffer = const_buffer(*source_iter) + source_buffer_offset; std::size_t bytes_copied = (buffer_copy_1)( target_buffer, asio::buffer(source_buffer, max_bytes_to_copy - total_bytes_copied)); total_bytes_copied += bytes_copied; if (bytes_copied == target_buffer.size()) { ++target_iter; target_buffer_offset = 0; } else target_buffer_offset += bytes_copied; if (bytes_copied == source_buffer.size()) { ++source_iter; source_buffer_offset = 0; } else source_buffer_offset += bytes_copied; } return total_bytes_copied; } } // namespace detail /// Copies bytes from a source buffer sequence to a target buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const ConstBufferSequence& source) ASIO_NOEXCEPT { return detail::buffer_copy( detail::buffer_sequence_cardinality(), detail::buffer_sequence_cardinality(), asio::buffer_sequence_begin(target), asio::buffer_sequence_end(target), asio::buffer_sequence_begin(source), asio::buffer_sequence_end(source)); } /// Copies a limited number of bytes from a source buffer sequence to a target /// buffer sequence. /** * @param target A modifiable buffer sequence representing the memory regions to * which the bytes will be copied. * * @param source A non-modifiable buffer sequence representing the memory * regions from which the bytes will be copied. * * @param max_bytes_to_copy The maximum number of bytes to be copied. * * @returns The number of bytes copied. * * @note The number of bytes copied is the lesser of: * * @li @c buffer_size(target) * * @li @c buffer_size(source) * * @li @c max_bytes_to_copy * * This function is implemented in terms of @c memcpy, and consequently it * cannot be used to copy between overlapping memory regions. */ template inline std::size_t buffer_copy(const MutableBufferSequence& target, const ConstBufferSequence& source, std::size_t max_bytes_to_copy) ASIO_NOEXCEPT { return detail::buffer_copy( detail::buffer_sequence_cardinality(), detail::buffer_sequence_cardinality(), asio::buffer_sequence_begin(target), asio::buffer_sequence_end(target), asio::buffer_sequence_begin(source), asio::buffer_sequence_end(source), max_bytes_to_copy); } /*@}*/ } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/detail/is_buffer_sequence.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// Trait to determine whether a type satisfies the MutableBufferSequence /// requirements. template struct is_mutable_buffer_sequence #if defined(GENERATING_DOCUMENTATION) : integral_constant #else // defined(GENERATING_DOCUMENTATION) : asio::detail::is_buffer_sequence #endif // defined(GENERATING_DOCUMENTATION) { }; /// Trait to determine whether a type satisfies the ConstBufferSequence /// requirements. template struct is_const_buffer_sequence #if defined(GENERATING_DOCUMENTATION) : integral_constant #else // defined(GENERATING_DOCUMENTATION) : asio::detail::is_buffer_sequence #endif // defined(GENERATING_DOCUMENTATION) { }; #if !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Trait to determine whether a type satisfies the DynamicBuffer_v1 /// requirements. template struct is_dynamic_buffer_v1 #if defined(GENERATING_DOCUMENTATION) : integral_constant #else // defined(GENERATING_DOCUMENTATION) : asio::detail::is_dynamic_buffer_v1 #endif // defined(GENERATING_DOCUMENTATION) { }; #endif // !defined(ASIO_NO_DYNAMIC_BUFFER_V1) /// Trait to determine whether a type satisfies the DynamicBuffer_v2 /// requirements. template struct is_dynamic_buffer_v2 #if defined(GENERATING_DOCUMENTATION) : integral_constant #else // defined(GENERATING_DOCUMENTATION) : asio::detail::is_dynamic_buffer_v2 #endif // defined(GENERATING_DOCUMENTATION) { }; /// Trait to determine whether a type satisfies the DynamicBuffer requirements. /** * If @c ASIO_NO_DYNAMIC_BUFFER_V1 is not defined, determines whether the * type satisfies the DynamicBuffer_v1 requirements. Otherwise, if @c * ASIO_NO_DYNAMIC_BUFFER_V1 is defined, determines whether the type * satisfies the DynamicBuffer_v2 requirements. */ template struct is_dynamic_buffer #if defined(GENERATING_DOCUMENTATION) : integral_constant #elif defined(ASIO_NO_DYNAMIC_BUFFER_V1) : asio::is_dynamic_buffer_v2 #else // defined(ASIO_NO_DYNAMIC_BUFFER_V1) : asio::is_dynamic_buffer_v1 #endif // defined(ASIO_NO_DYNAMIC_BUFFER_V1) { }; } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_BUFFER_HPP galera-4-26.4.25/asio/asio/placeholders.hpp000644 000164 177776 00000007513 15107057155 021566 0ustar00jenkinsnogroup000000 000000 // // placeholders.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_PLACEHOLDERS_HPP #define ASIO_PLACEHOLDERS_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #if defined(ASIO_HAS_BOOST_BIND) # include #endif // defined(ASIO_HAS_BOOST_BIND) #include "asio/detail/push_options.hpp" namespace asio { namespace placeholders { #if defined(GENERATING_DOCUMENTATION) /// An argument placeholder, for use with boost::bind(), that corresponds to /// the error argument of a handler for any of the asynchronous functions. unspecified error; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the bytes_transferred argument of a handler for asynchronous functions such /// as asio::basic_stream_socket::async_write_some or /// asio::async_write. unspecified bytes_transferred; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the iterator argument of a handler for asynchronous functions such as /// asio::async_connect. unspecified iterator; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the results argument of a handler for asynchronous functions such as /// asio::basic_resolver::async_resolve. unspecified results; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the results argument of a handler for asynchronous functions such as /// asio::async_connect. unspecified endpoint; /// An argument placeholder, for use with boost::bind(), that corresponds to /// the signal_number argument of a handler for asynchronous functions such as /// asio::signal_set::async_wait. unspecified signal_number; #elif defined(ASIO_HAS_BOOST_BIND) # if defined(__BORLANDC__) || defined(__GNUC__) inline boost::arg<1> error() { return boost::arg<1>(); } inline boost::arg<2> bytes_transferred() { return boost::arg<2>(); } inline boost::arg<2> iterator() { return boost::arg<2>(); } inline boost::arg<2> results() { return boost::arg<2>(); } inline boost::arg<2> endpoint() { return boost::arg<2>(); } inline boost::arg<2> signal_number() { return boost::arg<2>(); } # else namespace detail { template struct placeholder { static boost::arg& get() { static boost::arg result; return result; } }; } # if defined(ASIO_MSVC) && (ASIO_MSVC < 1400) static boost::arg<1>& error = asio::placeholders::detail::placeholder<1>::get(); static boost::arg<2>& bytes_transferred = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& iterator = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& results = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& endpoint = asio::placeholders::detail::placeholder<2>::get(); static boost::arg<2>& signal_number = asio::placeholders::detail::placeholder<2>::get(); # else namespace { boost::arg<1>& error = asio::placeholders::detail::placeholder<1>::get(); boost::arg<2>& bytes_transferred = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& iterator = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& results = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& endpoint = asio::placeholders::detail::placeholder<2>::get(); boost::arg<2>& signal_number = asio::placeholders::detail::placeholder<2>::get(); } // namespace # endif # endif #endif } // namespace placeholders } // namespace asio #include "asio/detail/pop_options.hpp" #endif // ASIO_PLACEHOLDERS_HPP galera-4-26.4.25/asio/asio/thread_pool.hpp000644 000164 177776 00000016230 15107057155 021415 0ustar00jenkinsnogroup000000 000000 // // thread_pool.hpp // ~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_THREAD_POOL_HPP #define ASIO_THREAD_POOL_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/detail/config.hpp" #include "asio/detail/noncopyable.hpp" #include "asio/detail/scheduler.hpp" #include "asio/detail/thread_group.hpp" #include "asio/execution_context.hpp" #include "asio/detail/push_options.hpp" namespace asio { /// A simple fixed-size thread pool. /** * The thread pool class is an execution context where functions are permitted * to run on one of a fixed number of threads. * * @par Submitting tasks to the pool * * To submit functions to the thread_pool, use the @ref asio::dispatch, * @ref asio::post or @ref asio::defer free functions. * * For example: * * @code void my_task() * { * ... * } * * ... * * // Launch the pool with four threads. * asio::thread_pool pool(4); * * // Submit a function to the pool. * asio::post(pool, my_task); * * // Submit a lambda object to the pool. * asio::post(pool, * []() * { * ... * }); * * // Wait for all tasks in the pool to complete. * pool.join(); @endcode */ class thread_pool : public execution_context { public: class executor_type; /// Constructs a pool with an automatically determined number of threads. ASIO_DECL thread_pool(); /// Constructs a pool with a specified number of threads. ASIO_DECL thread_pool(std::size_t num_threads); /// Destructor. /** * Automatically stops and joins the pool, if not explicitly done beforehand. */ ASIO_DECL ~thread_pool(); /// Obtains the executor associated with the pool. executor_type get_executor() ASIO_NOEXCEPT; /// Stops the threads. /** * This function stops the threads as soon as possible. As a result of calling * @c stop(), pending function objects may be never be invoked. */ ASIO_DECL void stop(); /// Joins the threads. /** * This function blocks until the threads in the pool have completed. If @c * stop() is not called prior to @c join(), the @c join() call will wait * until the pool has no more outstanding work. */ ASIO_DECL void join(); private: friend class executor_type; struct thread_function; // Helper function to create the underlying scheduler. ASIO_DECL detail::scheduler& add_scheduler(detail::scheduler* s); // The underlying scheduler. detail::scheduler& scheduler_; // The threads in the pool. detail::thread_group threads_; }; /// Executor used to submit functions to a thread pool. class thread_pool::executor_type { public: /// Obtain the underlying execution context. thread_pool& context() const ASIO_NOEXCEPT; /// Inform the thread pool that it has some outstanding work to do. /** * This function is used to inform the thread pool that some work has begun. * This ensures that the thread pool's join() function will not return while * the work is underway. */ void on_work_started() const ASIO_NOEXCEPT; /// Inform the thread pool that some work is no longer outstanding. /** * This function is used to inform the thread pool that some work has * finished. Once the count of unfinished work reaches zero, the thread * pool's join() function is permitted to exit. */ void on_work_finished() const ASIO_NOEXCEPT; /// Request the thread pool to invoke the given function object. /** * This function is used to ask the thread pool to execute the given function * object. If the current thread belongs to the pool, @c dispatch() executes * the function before returning. Otherwise, the function will be scheduled * to run on the thread pool. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void dispatch(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the thread pool to invoke the given function object. /** * This function is used to ask the thread pool to execute the given function * object. The function object will never be executed inside @c post(). * Instead, it will be scheduled to run on the thread pool. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void post(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Request the thread pool to invoke the given function object. /** * This function is used to ask the thread pool to execute the given function * object. The function object will never be executed inside @c defer(). * Instead, it will be scheduled to run on the thread pool. * * If the current thread belongs to the thread pool, @c defer() will delay * scheduling the function object until the current thread returns control to * the pool. * * @param f The function object to be called. The executor will make * a copy of the handler object as required. The function signature of the * function object must be: @code void function(); @endcode * * @param a An allocator that may be used by the executor to allocate the * internal storage needed for function invocation. */ template void defer(ASIO_MOVE_ARG(Function) f, const Allocator& a) const; /// Determine whether the thread pool is running in the current thread. /** * @return @c true if the current thread belongs to the pool. Otherwise * returns @c false. */ bool running_in_this_thread() const ASIO_NOEXCEPT; /// Compare two executors for equality. /** * Two executors are equal if they refer to the same underlying thread pool. */ friend bool operator==(const executor_type& a, const executor_type& b) ASIO_NOEXCEPT { return &a.pool_ == &b.pool_; } /// Compare two executors for inequality. /** * Two executors are equal if they refer to the same underlying thread pool. */ friend bool operator!=(const executor_type& a, const executor_type& b) ASIO_NOEXCEPT { return &a.pool_ != &b.pool_; } private: friend class thread_pool; // Constructor. explicit executor_type(thread_pool& p) : pool_(p) {} // The underlying thread pool. thread_pool& pool_; }; } // namespace asio #include "asio/detail/pop_options.hpp" #include "asio/impl/thread_pool.hpp" #if defined(ASIO_HEADER_ONLY) # include "asio/impl/thread_pool.ipp" #endif // defined(ASIO_HEADER_ONLY) #endif // ASIO_THREAD_POOL_HPP galera-4-26.4.25/asio/LICENSE_1_0.txt000644 000164 177776 00000002472 15107057155 017736 0ustar00jenkinsnogroup000000 000000 Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. galera-4-26.4.25/asio/asio.hpp000644 000164 177776 00000012020 15107057155 017106 0ustar00jenkinsnogroup000000 000000 // // asio.hpp // ~~~~~~~~ // // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef ASIO_HPP #define ASIO_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include "asio/associated_allocator.hpp" #include "asio/associated_executor.hpp" #include "asio/async_result.hpp" #include "asio/awaitable.hpp" #include "asio/basic_datagram_socket.hpp" #include "asio/basic_deadline_timer.hpp" #include "asio/basic_io_object.hpp" #include "asio/basic_raw_socket.hpp" #include "asio/basic_seq_packet_socket.hpp" #include "asio/basic_serial_port.hpp" #include "asio/basic_signal_set.hpp" #include "asio/basic_socket.hpp" #include "asio/basic_socket_acceptor.hpp" #include "asio/basic_socket_iostream.hpp" #include "asio/basic_socket_streambuf.hpp" #include "asio/basic_stream_socket.hpp" #include "asio/basic_streambuf.hpp" #include "asio/basic_waitable_timer.hpp" #include "asio/bind_executor.hpp" #include "asio/buffer.hpp" #include "asio/buffered_read_stream_fwd.hpp" #include "asio/buffered_read_stream.hpp" #include "asio/buffered_stream_fwd.hpp" #include "asio/buffered_stream.hpp" #include "asio/buffered_write_stream_fwd.hpp" #include "asio/buffered_write_stream.hpp" #include "asio/buffers_iterator.hpp" #include "asio/co_spawn.hpp" #include "asio/completion_condition.hpp" #include "asio/compose.hpp" #include "asio/connect.hpp" #include "asio/coroutine.hpp" #include "asio/deadline_timer.hpp" #include "asio/defer.hpp" #include "asio/detached.hpp" #include "asio/dispatch.hpp" #include "asio/error.hpp" #include "asio/error_code.hpp" #include "asio/execution_context.hpp" #include "asio/executor.hpp" #include "asio/executor_work_guard.hpp" #include "asio/generic/basic_endpoint.hpp" #include "asio/generic/datagram_protocol.hpp" #include "asio/generic/raw_protocol.hpp" #include "asio/generic/seq_packet_protocol.hpp" #include "asio/generic/stream_protocol.hpp" #include "asio/handler_alloc_hook.hpp" #include "asio/handler_continuation_hook.hpp" #include "asio/handler_invoke_hook.hpp" #include "asio/high_resolution_timer.hpp" #include "asio/io_context.hpp" #include "asio/io_context_strand.hpp" #include "asio/io_service.hpp" #include "asio/io_service_strand.hpp" #include "asio/ip/address.hpp" #include "asio/ip/address_v4.hpp" #include "asio/ip/address_v4_iterator.hpp" #include "asio/ip/address_v4_range.hpp" #include "asio/ip/address_v6.hpp" #include "asio/ip/address_v6_iterator.hpp" #include "asio/ip/address_v6_range.hpp" #include "asio/ip/network_v4.hpp" #include "asio/ip/network_v6.hpp" #include "asio/ip/bad_address_cast.hpp" #include "asio/ip/basic_endpoint.hpp" #include "asio/ip/basic_resolver.hpp" #include "asio/ip/basic_resolver_entry.hpp" #include "asio/ip/basic_resolver_iterator.hpp" #include "asio/ip/basic_resolver_query.hpp" #include "asio/ip/host_name.hpp" #include "asio/ip/icmp.hpp" #include "asio/ip/multicast.hpp" #include "asio/ip/resolver_base.hpp" #include "asio/ip/resolver_query_base.hpp" #include "asio/ip/tcp.hpp" #include "asio/ip/udp.hpp" #include "asio/ip/unicast.hpp" #include "asio/ip/v6_only.hpp" #include "asio/is_executor.hpp" #include "asio/is_read_buffered.hpp" #include "asio/is_write_buffered.hpp" #include "asio/local/basic_endpoint.hpp" #include "asio/local/connect_pair.hpp" #include "asio/local/datagram_protocol.hpp" #include "asio/local/stream_protocol.hpp" #include "asio/packaged_task.hpp" #include "asio/placeholders.hpp" #include "asio/posix/basic_descriptor.hpp" #include "asio/posix/basic_stream_descriptor.hpp" #include "asio/posix/descriptor.hpp" #include "asio/posix/descriptor_base.hpp" #include "asio/posix/stream_descriptor.hpp" #include "asio/post.hpp" #include "asio/read.hpp" #include "asio/read_at.hpp" #include "asio/read_until.hpp" #include "asio/redirect_error.hpp" #include "asio/serial_port.hpp" #include "asio/serial_port_base.hpp" #include "asio/signal_set.hpp" #include "asio/socket_base.hpp" #include "asio/steady_timer.hpp" #include "asio/strand.hpp" #include "asio/streambuf.hpp" #include "asio/system_context.hpp" #include "asio/system_error.hpp" #include "asio/system_executor.hpp" #include "asio/system_timer.hpp" #include "asio/this_coro.hpp" #include "asio/thread.hpp" #include "asio/thread_pool.hpp" #include "asio/time_traits.hpp" #include "asio/use_awaitable.hpp" #include "asio/use_future.hpp" #include "asio/uses_executor.hpp" #include "asio/version.hpp" #include "asio/wait_traits.hpp" #include "asio/windows/basic_object_handle.hpp" #include "asio/windows/basic_overlapped_handle.hpp" #include "asio/windows/basic_random_access_handle.hpp" #include "asio/windows/basic_stream_handle.hpp" #include "asio/windows/object_handle.hpp" #include "asio/windows/overlapped_handle.hpp" #include "asio/windows/overlapped_ptr.hpp" #include "asio/windows/random_access_handle.hpp" #include "asio/windows/stream_handle.hpp" #include "asio/write.hpp" #include "asio/write_at.hpp" #endif // ASIO_HPP galera-4-26.4.25/gcache/000755 000164 177776 00000000000 15107057160 015722 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcache/tests/000755 000164 177776 00000000000 15107057160 017064 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcache/tests/gcache_top_test.cpp000644 000164 177776 00000022655 15107057155 022741 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2025 Codership Oy * * $Id$ */ #define GCACHE_UNIT_TEST #define GCACHE_PAGE_STORE_UNIT_TEST #include "GCache.hpp" #include "gcache_limits.hpp" #include "gcache_top_test.hpp" #include #include using namespace gcache; static int const DEBUG = 4; static void test_caching_fill_page(gcache::GCache& gc, std::vector& buf, size_t const expected_page_count) { const PageStore& ps(gc.page_store()); for (int i(0); i < 3; i++) { buf.push_back(gc.malloc(1)); void* const ptr(buf[buf.size() - 1]); ck_assert(nullptr != ptr); ck_assert(SEQNO_NONE == ptr2BH(ptr)->seqno_g); ck_assert_msg(ps.count() == expected_page_count, "%d. buf = %zu, ps.count() = %zu (expected %zu)", i, buf.size()-1, ps.count(), expected_page_count); } } START_TEST(top_level_page_caching) // test that caching in pages work { log_info << "\n#\n# top_level_page_caching\n#"; const char* const dir_name = ""; size_t const bh_size = sizeof(gcache::BufferHeader); size_t const page_size = (8 + bh_size)*3; // fits 3 buffers <= 8 bytes size_t const keep_size = 2*page_size; // keep at least 2 pages gu::Config cfg; GCache::register_params(cfg); cfg.set("gcache.dir", dir_name); cfg.set("gcache.size", 0); // turn off ring buffer cfg.set("gcache.page_size", page_size); cfg.set("gcache.keep_pages_size", keep_size); #ifndef NDEBUG cfg.set("gcache.debug", DEBUG); #endif GCache gc(nullptr, cfg, dir_name); const seqno2ptr_t& sm(gc.seqno_map()); const PageStore& ps(gc.page_store()); ck_assert_msg(ps.page_size() == page_size, "ps.page_size: %zu (expected %zu)", ps.page_size(), page_size); std::vector buf; mark_point(); /* * 1. Populate 6 pages */ for (size_t page_count(1); page_count <= 6; page_count++) test_caching_fill_page(gc, buf, page_count); ck_assert_msg(ps.total_pages() == 6, "total_pages %zu (expected 6)", ps.total_pages()); /* * 2. Free some "unused" buffers and assign seqnos out of order to others */ gc.free(buf[0]); gc.free(buf[1]); gc.free(buf[2]); ps.wait_page_discard(); // 1st page should go ck_assert_msg(ps.total_pages() == 5, "total_pages %zu (expected 5)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == -1, "seqno_min: %" PRId64 " (expected -1)", gc.seqno_min()); gc.free(buf[3]); gc.seqno_assign(buf[4], 1, 0, false); gc.seqno_release(1); ck_assert(gc.seqno_min() == 1); gc.free(buf[5]); ps.wait_page_discard(); // 2nd page should go ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 4)", ps.total_pages()); if (!sm.empty()) { ck_assert_msg(sm.empty() == true, "SM size: %zu, begin: %" PRId64 ", end: %" PRId64 ", front: %p, back: %p" ,sm.size(), sm.index_begin(), sm.index_end() ,sm.front(), sm.back() ); } ck_assert_msg(gc.seqno_min() == -1, "seqno_min: %" PRId64 " (expected -1)", gc.seqno_min()); gc.free(buf[6]); gc.seqno_assign(buf[7], 4, 0, false); // this should pin the page gc.free(buf[8]); ck_assert_msg(gc.seqno_min() == 4, "seqno_min: %" PRId64 " (expected 4)", gc.seqno_min()); ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 4)", ps.total_pages()); gc.free(buf[9]); gc.seqno_assign(buf[11], 2, 0, false); gc.seqno_release(2); ck_assert(gc.seqno_min() == 2); gc.seqno_assign(buf[10], 3, 0, false); gc.seqno_release(3); // page 3 should stay since seqno 4 is not released, and therefore page 4 ps.wait_page_discard(); ck_assert(gc.seqno_min() == 2); ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 4)", ps.total_pages()); gc.seqno_release(4); // only after releasing seqno 4 all other buffers and pages 3 and 4 may go ps.wait_page_discard(); ck_assert_msg(ps.total_pages() == 2, "total_pages %zu (expected 2)", ps.total_pages()); ck_assert(gc.seqno_min() == -1); /* * 3. Test that the last 2 pages will remain even after freeing */ gc.free(buf[12]); gc.seqno_assign(buf[13], 5, 0, false); gc.seqno_assign(buf[14], 6, 0, false); ck_assert(gc.seqno_min() == 5); gc.seqno_assign(buf[15], 7, 0, false); gc.seqno_assign(buf[16], 8, 0, false); gc.seqno_release(8); // releases all previous seqnos as well gc.free(buf[17]); ps.wait_page_discard(); ck_assert(gc.seqno_min() == 5); ck_assert_msg(ps.total_pages() == 2, "total_pages %zu (expected 2)", ps.total_pages()); buf.push_back(gc.malloc(1)); // this shall allocate one more page and page 5 should go, // only 6 and 7 should remain ck_assert(ps.count() == 7); ps.wait_page_discard(); ck_assert_msg(ps.total_pages() == 2, "total_pages %zu (expected 2)", ps.total_pages()); ck_assert(gc.seqno_min() == 7); for (size_t i(18); i < buf.size(); i++) { gc.free(buf[i]); } mark_point(); } END_TEST START_TEST(top_level_page_caching_locking) // test that caching in pages work { log_info << "\n#\n# top_level_page_caching_locking\n#"; const char* const dir_name = ""; size_t const bh_size = sizeof(gcache::BufferHeader); size_t const page_size = (8 + bh_size)*3; // fits 3 buffers <= 8 bytes size_t const keep_size = 2*page_size; // keep at least 2 pages gu::Config cfg; GCache::register_params(cfg); cfg.set("gcache.dir", dir_name); cfg.set("gcache.size", 0); // turn off ring buffer cfg.set("gcache.page_size", page_size); cfg.set("gcache.keep_pages_size", keep_size); #ifndef NDEBUG cfg.set("gcache.debug", DEBUG); #endif GCache gc(nullptr, cfg, dir_name); const PageStore& ps(gc.page_store()); ck_assert_msg(ps.page_size() == page_size, "ps.page_size: %zu (expected %zu)", ps.page_size(), page_size); std::vector buf; mark_point(); /* * 1. Populate 5 pages */ for (size_t page_count(1); page_count <= 5; page_count++) test_caching_fill_page(gc, buf, page_count); ck_assert_msg(ps.total_pages() == 5, "total_pages %zu (expected 5)", ps.total_pages()); /* * 2. Assign seqnos */ for (size_t seqno(1); seqno <= buf.size(); seqno++) gc.seqno_assign(buf[seqno - 1], seqno, 0, false); ck_assert(gc.seqno_min() == 1); /* * 3. Lock seqno 5, it should hold page 2 */ gc.seqno_lock(5); gc.seqno_release(1); gc.seqno_release(2); ps.wait_page_discard(); ck_assert_msg(ps.total_pages() == 5, "total_pages %zu (expected 5)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 1, "seqno_min: %" PRId64 " (expected 1)", gc.seqno_min()); gc.seqno_release(3); ps.wait_page_discard(); // page 1 should go ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 4)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 4, "seqno_min: %" PRId64 " (expected 4)", gc.seqno_min()); try { gc.seqno_lock(3); ck_abort_msg("Should fail."); } catch (gu::NotFound&) {} gc.seqno_release(6); // batch-releases all seqnos <= 6 ps.wait_page_discard(); // page 2 should be held by a lock ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 5)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 4, "seqno_min: %" PRId64 " (expected 4)", gc.seqno_min()); /* * 4. Lock seqno 4 to test that nothing changed */ gc.seqno_lock(4); gc.seqno_release(9); ps.wait_page_discard(); // page 2 should be held by a lock ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 5)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 4, "seqno_min: %" PRId64 " (expected 4)", gc.seqno_min()); /* and release page 4 just to see that it doesn't go below because it is * within the keep_pages_size */ gc.seqno_release(12); /* * 5. Unlock seqnos. Only after the second unlock pages should be discarded */ gc.seqno_unlock(); ps.wait_page_discard(); ck_assert_msg(ps.total_pages() == 4, "total_pages %zu (expected 4)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 4, "seqno_min: %" PRId64 " (expected 4)", gc.seqno_min()); gc.seqno_unlock(); ps.wait_page_discard(); // pages 2 and 3 should be discarded ck_assert_msg(ps.total_pages() == 2, "total_pages %zu (expected 2)", ps.total_pages()); ck_assert_msg(gc.seqno_min() == 10, "seqno_min: %" PRId64 " (expected 10)", gc.seqno_min()); for (size_t seqno(10); seqno <= buf.size(); seqno++) { gc.seqno_release(seqno); } mark_point(); } END_TEST Suite* gcache_top_suite() { Suite* s = suite_create("gcache::top-level"); TCase* tc; tc = tcase_create("test"); tcase_add_test(tc, top_level_page_caching); tcase_add_test(tc, top_level_page_caching_locking); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcache/tests/CMakeLists.txt000644 000164 177776 00000000664 15107057155 021636 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020-2025 Codership Oy # add_executable(gcache_tests gcache_mem_test.cpp gcache_page_test.cpp gcache_rb_test.cpp gcache_top_test.cpp gcache_tests.cpp ) # TODO: Fix target_compile_options(gcache_tests PRIVATE -Wno-conversion -Wno-unused-parameter ) target_link_libraries(gcache_tests gcache ${GALERA_UNIT_TEST_LIBS}) add_test( NAME gcache_tests COMMAND gcache_tests ) galera-4-26.4.25/gcache/tests/gcache_rb_test.hpp000644 000164 177776 00000000357 15107057155 022542 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011 Codership Oy * * $Id$ */ #ifndef __gcache_rb_test_hpp__ #define __gcache_rb_test_hpp__ extern "C" { #include } extern Suite* gcache_rb_suite(); #endif // __gcache_rb_test_hpp__ galera-4-26.4.25/gcache/tests/gcache_page_test.cpp000644 000164 177776 00000024270 15107057155 023046 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2025 Codership Oy * * $Id$ */ #define GCACHE_PAGE_STORE_UNIT_TEST #include "gcache_page_store.hpp" #include "gcache_bh.hpp" #include "gcache_limits.hpp" #include "gcache_page_test.hpp" #include #include using namespace gcache; static int const DEBUG = 4; // page store debug flag class SeqnoMapStub : public SeqnoMap { public: SeqnoMapStub() : last_seqno_(0), low_limit_(SEQNO_NONE) {} void seqno_discard(const seqno_t& seqno) { assert(seqno > 0); /* make sure set_low_limit() is called before seqno_discard() */ ck_assert_msg(seqno <= low_limit_, "Discard seqno: %" PRId64 ", low_limit: %" PRId64, seqno, low_limit_); if (last_seqno_ < seqno) last_seqno_ = seqno; } void set_low_limit(const seqno_t& seqno) { log_info << "set_low_limit(" << seqno << ")"; low_limit_ = std::max(low_limit_, seqno); } seqno_t low_limit() const { return low_limit_; } seqno_t seqno_discarded() const { return last_seqno_; } private: seqno_t last_seqno_; seqno_t low_limit_; }; static void ps_free (gcache::PageStore& ps, void* ptr) { BufferHeader* const bh(ptr2BH(ptr)); BH_release (bh); ps.free(bh); } START_TEST(test1) { log_info << "\n#\n# test1\n#"; const char* const dir_name = ""; ssize_t const bh_size = sizeof(gcache::BufferHeader); ssize_t const keep_size = 1; ssize_t const page_size = 2 + bh_size; SeqnoMapStub sm; gcache::PageStore ps (sm, dir_name, keep_size, page_size, DEBUG, false); ck_assert_msg(ps.count() == 0,"expected count 0, got %zu",ps.count()); ck_assert_msg(ps.total_pages() == 0,"expected 0 pages, got %zu",ps.total_pages()); ck_assert_msg(ps.total_size() == 0,"expected size 0, got %zu", ps.total_size()); void* buf = ps.malloc (3 + bh_size); ck_assert(0 != buf); ck_assert_msg(ps.count() == 1,"expected count 1, got %zu",ps.count()); ck_assert_msg(ps.total_pages() == 1,"expected 1 pages, got %zu",ps.total_pages()); void* tmp = ps.realloc (buf, 2 + bh_size); ck_assert(buf == tmp); ck_assert_msg(ps.count() == 1,"expected count 1, got %zu",ps.count()); ck_assert_msg(ps.total_pages() == 1,"expected 1 pages, got %zu",ps.total_pages()); tmp = ps.realloc (buf, 4 + bh_size); // here new page should be allocated ck_assert(0 != tmp); ck_assert(buf != tmp); ck_assert_msg(ps.count() == 2,"expected count 2, got %zu",ps.count()); ck_assert_msg(ps.total_pages() == 1,"expected 1 pages, got %zu",ps.total_pages()); ps_free(ps, tmp); ck_assert_msg(ps.count() == 2,"expected count 2, got %zu",ps.count()); ps.wait_page_discard(); ck_assert_msg(ps.total_pages() == 0,"expected 0 pages, got %zu",ps.total_pages()); ck_assert_msg(ps.total_size() == 0,"expected size 0, got %zu", ps.total_size()); mark_point(); } END_TEST START_TEST(test2) { log_info << "\n#\n# test2\n#"; const char* const dir_name = ""; ssize_t const bh_size = sizeof(gcache::BufferHeader); ssize_t const keep_size = 1; ssize_t page_size = (1 << 20) + bh_size; SeqnoMapStub sm; gcache::PageStore ps (sm, dir_name, keep_size, page_size, 0, false); mark_point(); uint8_t* buf = static_cast(ps.malloc (page_size)); ck_assert(0 != buf); while (--page_size) { buf[page_size] = page_size; } mark_point(); ps_free(ps, buf); mark_point(); } END_TEST START_TEST(test3) // check that all page size is efficiently used { log_info << "\n#\n# test3\n#"; const char* const dir_name = ""; ssize_t const keep_size = 1; ssize_t const page_size = 1024; SeqnoMapStub sm; { gcache::PageStore ps (sm, dir_name, keep_size, page_size, 0, false); mark_point(); ssize_t ptr_size = (page_size / 2); void* ptr1 = ps.malloc (ptr_size); ck_assert(0 != ptr1); void* ptr2 = ps.malloc (ptr_size); ck_assert(0 != ptr2); ck_assert_msg(ps.count() == 1, "ps.count() = %zd, expected 1", ps.count()); // check that ptr2 is adjacent to ptr1 void* tmp = static_cast(ptr1) + ptr_size; ck_assert_msg(tmp == ptr2, "tmp = %p, ptr2 = %p", tmp, ptr2); ps_free(ps, ptr1); ps_free(ps, ptr2); } mark_point(); } END_TEST static void test_caching_fill_page(gcache::PageStore& ps, std::vector& buf, size_t const expected_page_count) { for (int i(0); i < 3; i++) { buf.push_back(ps.malloc(gcache::Limits::MIN_SIZE)); void* const ptr(buf[buf.size() - 1]); ck_assert(nullptr != ptr); ck_assert(SEQNO_NONE == ptr2BH(ptr)->seqno_g); ck_assert_msg(ps.count() == expected_page_count, "buf = %zu, ps.count() = %zu (expected %zu)", buf.size()-1, ps.count(), expected_page_count); } } START_TEST(test_caching) // test that caching in pages work { log_info << "\n#\n# test_caching\n#"; const char* const dir_name = ""; size_t const bh_size = sizeof(gcache::BufferHeader); size_t const page_size = (8 + bh_size)*3; // fits 3 buffers <= 8 bytes size_t const keep_size = 2*page_size; // keep at least 2 pages std::vector buf; SeqnoMapStub sm; gcache::PageStore ps(sm, dir_name, keep_size, page_size, DEBUG, false); mark_point(); /* * 1. Populate 6 pages */ for (size_t page_count(1); page_count <= 6; page_count++) test_caching_fill_page(ps, buf, page_count); ck_assert(6 == ps.total_pages()); ck_assert(18 == buf.size()); /* * 2. Free some "unused" buffers and assign seqnos out of order to * others */ ps_free(ps, buf[0]); ps_free(ps, buf[1]); ps_free(ps, buf[2]); ps.wait_page_discard(); // one page should go ck_assert_msg(5 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ps.wait_page_discard(); ck_assert(sm.low_limit() == 0); ck_assert(sm.seqno_discarded() == 0); ps_free(ps, buf[3]); ps.seqno_assign(ptr2BH(buf[4]), 1); ps_free(ps, buf[4]); ps_free(ps, buf[5]); ps.wait_page_discard(); // one page should go ck_assert_msg(4 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 1); ps_free(ps, buf[6]); ps.seqno_assign(ptr2BH(buf[7]), 4); // this should pin the page ps_free(ps, buf[8]); ps.wait_page_discard(); ck_assert_msg(4 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 1); ps_free(ps, buf[9]); ps.seqno_assign(ptr2BH(buf[11]), 2); ps.seqno_assign(ptr2BH(buf[10]), 3); ps_free(ps, buf[10]); ps.wait_page_discard(); ck_assert_msg(4 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ps_free(ps, buf[11]); ps.wait_page_discard(); ck_assert_msg(4 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ps_free(ps, buf[7]); ps.wait_page_discard(); // this should free 2 pages at once ck_assert_msg(2 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 4); /* * 3. Test that the last 2 pages will remain even after freeing */ ps_free(ps, buf[12]); ps.seqno_assign(ptr2BH(buf[13]), 5); ps_free(ps, buf[13]); ps.seqno_assign(ptr2BH(buf[14]), 6); ps_free(ps, buf[14]); ps.wait_page_discard(); // all pages should stay ck_assert_msg(2 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 4); ps.seqno_assign(ptr2BH(buf[15]), 7); ps_free(ps, buf[15]); ps.seqno_assign(ptr2BH(buf[16]), 8); ps_free(ps, buf[16]); ps_free(ps, buf[17]); ps.wait_page_discard(); // all pages should stay ck_assert_msg(2 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 4); /* * 4. Test that new page allocation caused by new malloc() will cause * the oldest page to go */ size_t const old_page_count(ps.count()); buf.push_back(ps.malloc(gcache::Limits::MIN_SIZE)); ck_assert(old_page_count + 1 == ps.count()); // new page was allocated ck_assert_msg(2 == ps.total_pages(), "keep_pages/total_pages = %zu/%zu, " "keep_size/total_size = %zu/%zu", ps.keep_page(), ps.total_pages(), ps.keep_size(), ps.total_size()); ck_assert(sm.low_limit() == 6); for (size_t i(18); i < buf.size(); i++) { ps_free(ps, buf[i]); } mark_point(); } END_TEST Suite* gcache_page_suite() { Suite* s = suite_create("gcache::PageStore"); TCase* tc; tc = tcase_create("test"); tcase_add_test(tc, test1); tcase_add_test(tc, test2); tcase_add_test(tc, test3); tcase_add_test(tc, test_caching); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcache/tests/gcache_top_test.hpp000644 000164 177776 00000000363 15107057155 022736 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2025 Codership Oy * * $Id$ */ #ifndef __gcache_top_test_hpp__ #define __gcache_top_test_hpp__ extern "C" { #include } extern Suite* gcache_top_suite(); #endif // __gcache_top_test_hpp__ galera-4-26.4.25/gcache/tests/gcache_mem_test.hpp000644 000164 177776 00000000363 15107057155 022712 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011 Codership Oy * * $Id$ */ #ifndef __gcache_mem_test_hpp__ #define __gcache_mem_test_hpp__ extern "C" { #include } extern Suite* gcache_mem_suite(); #endif // __gcache_mem_test_hpp__ galera-4-26.4.25/gcache/tests/gcache_tests.hpp000644 000164 177776 00000001063 15107057155 022235 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2025 Codership Oy // $Id$ /*! * @file: package specific part of the main test file. */ #ifndef __gcache_tests_hpp__ #define __gcache_tests_hpp__ #include "gcache_mem_test.hpp" #include "gcache_rb_test.hpp" #include "gcache_page_test.hpp" #include "gcache_top_test.hpp" extern "C" { #include } typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gcache_mem_suite, gcache_rb_suite, gcache_page_suite, gcache_top_suite, 0 }; #endif /* __gcache_tests_hpp__ */ galera-4-26.4.25/gcache/tests/gcache_tests.cpp000644 000164 177776 00000002024 15107057155 022226 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2017 Codership Oy #include #include #include extern "C" { #include } #include "gcache_tests.hpp" #define LOG_FILE "gcache_tests.log" int main(int argc, char* argv[]) { bool no_fork = (argc >= 2 && std::string(argv[1]) == "nofork"); FILE* log_file = 0; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); int failed = 0; for (int i = 0; suites[i] != 0; ++i) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all(sr, CK_NORMAL); failed += srunner_ntests_failed(sr); srunner_free(sr); } if (log_file != 0) fclose(log_file); printf ("Total tests failed: %d\n", failed); if (0 == failed && 0 != log_file) ::unlink(LOG_FILE); return failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-4-26.4.25/gcache/tests/gcache_mem_test.cpp000644 000164 177776 00000003773 15107057155 022715 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011-2020 Codership Oy * * $Id$ */ #include "gcache_mem_store.hpp" #include "gcache_bh.hpp" #include "gcache_mem_test.hpp" using namespace gcache; START_TEST(test1) { ssize_t const bh_size (sizeof(gcache::BufferHeader)); ssize_t const mem_size (3 + 2*bh_size); seqno2ptr_t s2p(SEQNO_NONE); MemStore ms(mem_size, s2p, 0); void* buf1 = ms.malloc (1 + bh_size); ck_assert(NULL != buf1); BufferHeader* bh1(ptr2BH(buf1)); ck_assert(bh1->seqno_g == SEQNO_NONE); ck_assert(!BH_is_released(bh1)); void* buf2 = ms.malloc (1 + bh_size); ck_assert(NULL != buf2); ck_assert(buf1 != buf2); void* buf3 = ms.malloc (1 + bh_size); ck_assert(NULL == buf3); buf1 = ms.realloc (buf1, 2 + bh_size); ck_assert(NULL != buf1); bh1 = ptr2BH(buf1); ck_assert(bh1->seqno_g == SEQNO_NONE); ck_assert(!BH_is_released(bh1)); BufferHeader* bh2(ptr2BH(buf2)); ck_assert(bh2->seqno_g == SEQNO_NONE); ck_assert(!BH_is_released(bh2)); bh2->seqno_g = 1; /* freeing seqno'd buffer should only release it, but not discard */ BH_release(bh2); ms.free (bh2); ck_assert(BH_is_released(bh2)); buf3 = ms.malloc (1 + bh_size); ck_assert(NULL == buf3); /* discarding a buffer should finally free some space for another */ ms.discard(bh2); buf3 = ms.malloc (1 + bh_size); ck_assert(NULL != buf3); /* freeing unseqno'd buffer should free space immeditely */ bh1 = ptr2BH(buf1); BH_release(bh1); ms.free (bh1); void* buf4 = ms.malloc (2 + bh_size); ck_assert(NULL != buf4); BufferHeader* bh3(ptr2BH(buf3)); BH_release(bh3); ms.free (bh3); BufferHeader* bh4(ptr2BH(buf4)); BH_release(bh4); ms.free (bh4); ck_assert(!ms._allocd()); } END_TEST Suite* gcache_mem_suite() { Suite* s = suite_create("gcache::MemStore"); TCase* tc; tc = tcase_create("test"); tcase_add_test(tc, test1); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcache/tests/SConscript000644 000164 177776 00000001451 15107057155 021103 0ustar00jenkinsnogroup000000 000000 Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' #/common #/gcache/src #/galerautils/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) env.Prepend(LIBS=File('#/gcache/src/libgcache.a')) gcache_tests = env.Program(target = 'gcache_tests', source = Glob('*.cpp')) # source = Split(''' # gcache_tests.cpp # ''')) stamp="gcache_tests.passed" env.Test(stamp, gcache_tests) env.Alias("test", stamp) Clean(gcache_tests, ['#/gcache_tests.log', '#/gcache.page.000000', '#/rb_test']) galera-4-26.4.25/gcache/tests/gcache_rb_test.cpp000644 000164 177776 00000035104 15107057155 022533 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011-2021 Codership Oy * * $Id$ */ #define GCACHE_RB_UNIT_TEST #include "gcache_rb_store.hpp" #include "gcache_bh.hpp" #include "gcache_rb_test.hpp" #include #include using namespace gcache; static gu::UUID const GID(NULL, 0); static std::string const RB_NAME("rb_test"); static size_t const BH_SIZE(sizeof(gcache::BufferHeader)); typedef MemOps::size_type size_type; static size_type ALLOC_SIZE(size_type s) { return MemOps::align_size(s + BH_SIZE); } START_TEST(test1) { ::unlink(RB_NAME.c_str()); size_t const rb_size(ALLOC_SIZE(2) * 2); seqno2ptr_t s2p(SEQNO_NONE); gu::UUID gid(GID); RingBuffer rb(NULL, RB_NAME, rb_size, s2p, gid, 0, false); ck_assert_msg(rb.size() == rb_size, "Expected %zd, got %zd", rb_size, rb.size()); if (gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << gid; ck_abort_msg("%s", os.str().c_str()); } void* buf1 = rb.malloc (MemOps::align_size(rb_size/2 + 1)); ck_assert(NULL == buf1); // > 1/2 size buf1 = rb.malloc (ALLOC_SIZE(1)); ck_assert(NULL != buf1); BufferHeader* bh1(ptr2BH(buf1)); ck_assert(bh1->seqno_g == SEQNO_NONE); ck_assert(!BH_is_released(bh1)); void* buf2 = rb.malloc (ALLOC_SIZE(2)); ck_assert(NULL != buf2); ck_assert(!BH_is_released(bh1)); BufferHeader* bh2(ptr2BH(buf2)); ck_assert(bh2->seqno_g == SEQNO_NONE); ck_assert(!BH_is_released(bh2)); void* tmp = rb.realloc (buf1, ALLOC_SIZE(2)); // anything <= MemOps::ALIGNMENT should fit into original buffer ck_assert(tmp == buf1 && MemOps::ALIGNMENT > 1); tmp = rb.realloc (buf1, ALLOC_SIZE(MemOps::ALIGNMENT + 1)); // should require new buffer for which there's no space ck_assert(bh2->seqno_g == SEQNO_NONE); ck_assert(NULL == tmp); BH_release(bh2); rb.free (bh2); tmp = rb.realloc (buf1, ALLOC_SIZE(3)); if (MemOps::ALIGNMENT > 2) { ck_assert(NULL != tmp); ck_assert(buf1 == tmp); } else { ck_assert(NULL == tmp); } BH_release(bh1); rb.free (bh1); ck_assert(BH_is_released(bh1)); buf1 = rb.malloc(ALLOC_SIZE(1)); ck_assert(NULL != buf1); tmp = rb.realloc (buf1, ALLOC_SIZE(2)); ck_assert(NULL != tmp); ck_assert(tmp == buf1); buf2 = rb.malloc (ALLOC_SIZE(1)); ck_assert(NULL != buf2); tmp = rb.realloc (buf2, ALLOC_SIZE(2)); ck_assert(NULL != tmp); ck_assert(tmp == buf2); tmp = rb.malloc (ALLOC_SIZE(1)); ck_assert(NULL == tmp); BH_release(ptr2BH(buf1)); rb.free(ptr2BH(buf1)); BH_release(ptr2BH(buf2)); rb.free(ptr2BH(buf2)); tmp = rb.malloc (ALLOC_SIZE(2)); ck_assert(NULL != tmp); mark_point(); } END_TEST START_TEST(recovery) { struct msg { char msg; seqno_t g; seqno_t d; size_t size() const { return sizeof(msg); } }; #define MAX_MSGS 10 struct msg msgs[MAX_MSGS] = { { '0', 1, 0 }, { '1', 2, 0 }, { '2', 4, 1 }, { '3', SEQNO_ILL, SEQNO_ILL }, { '4', 3, 1 }, { '5', SEQNO_ILL, SEQNO_ILL }, { '6', 5, SEQNO_ILL }, { '7', SEQNO_ILL, SEQNO_ILL }, { '8', 6, 4 }, { '9', 7, 4 } }; size_type const msg_size(ALLOC_SIZE(sizeof(reinterpret_cast(0)->msg))); struct rb_ctx { size_t const size; seqno2ptr_t s2p; gu::UUID gid; RingBuffer rb; rb_ctx(size_t s, bool recover = true) : size(s), s2p(SEQNO_NONE), gid(GID), rb(NULL, RB_NAME, size, s2p, gid, 0, recover) {} void seqno_assign (seqno2ptr_t& s2p, void* const ptr, seqno_t const g, seqno_t const d) { s2p.insert(g, ptr); BufferHeader* bh(ptr2BH(ptr)); bh->seqno_g = g; if (d < 0) bh->flags |= BUFFER_SKIPPED; } void* add_msg(struct msg& m) { void* ret(rb.malloc(ALLOC_SIZE(m.size()))); if (ret) { ::memcpy(ret, &m.msg, m.size()); if (m.g > 0) seqno_assign(s2p, ret, m.g, m.d); BH_release(ptr2BH(ret)); rb.free(ptr2BH(ret)); } return ret; } void print_map() { std::ostringstream os; os << "S2P map:\n"; for (seqno2ptr_t::iterator i = s2p.begin(); i != s2p.end(); ++i) { log_info << "\tseqno: " << s2p.index(i) << ", msg: " << reinterpret_cast(*i) << "\n"; } log_info << os.str(); } }; seqno_t seqno_min, seqno_max; size_t const rb_5size(msg_size*5); { rb_ctx ctx(rb_5size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(ctx.s2p.empty()); void* m(ctx.add_msg(msgs[0])); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[0].g) == m); m = ctx.add_msg(msgs[1]); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[1].g) == m); m = ctx.add_msg(msgs[2]); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[2].g) == m); m = ctx.add_msg(msgs[3]); ck_assert(NULL != m); ck_assert(msgs[3].g <= 0); ck_assert(ctx.s2p.find(msgs[3].g) == ctx.s2p.end()); seqno_min = ctx.s2p.index_front(); seqno_max = ctx.s2p.index_back(); } /* What we have now is |111222444***|----| */ /* Reopening of the file should: * 1) discard messages 1, 2 since there is a hole at 3. Only 4 should remain * 2) trim the trailing unordered message */ { rb_ctx ctx(rb_5size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 1); ck_assert(ctx.s2p.index_front() != seqno_min); ck_assert(ctx.s2p.index_front() == seqno_max); void* m(ctx.add_msg(msgs[4])); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[4].g) == m); m = ctx.add_msg(msgs[5]); ck_assert(NULL != m); ck_assert(msgs[5].g <= 0); ck_assert(ctx.s2p.find(msgs[5].g) == ctx.s2p.end()); m = ctx.add_msg(msgs[6]); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[6].g) <= m); // here we should have rollover ck_assert(ptr2BH(m) == BH_cast(ctx.rb.start())); seqno_min = ctx.s2p.index_front(); seqno_max = ctx.s2p.index_back(); } /* What we have now is |555|---|444333***| */ /* Reopening of the file should: * 1) discard unordered message at the end * 2) continuous seqno interval should be now 3,4,5 */ { rb_ctx ctx0(rb_5size); ck_assert_msg(ctx0.rb.size() == ctx0.size, "Expected %zd, got %zd", ctx0.size, ctx0.rb.size()); if (ctx0.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx0.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx0.s2p.empty()); ck_assert(ctx0.s2p.size() == 3); ck_assert(ctx0.s2p.index_front() == seqno_min); ck_assert(ctx0.s2p.index_back() == seqno_max); /* now try to open unclosed file. Results should be the same */ rb_ctx ctx(rb_5size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert_msg(ctx.s2p.size() == 3, "Expected seqno2ptr size 3, got %zd", ctx.s2p.size()); ck_assert(ctx.s2p.index_front() == seqno_min); ck_assert(ctx.s2p.index_back() == seqno_max); seqno_min = ctx.s2p.index_front(); seqno_max = ctx.s2p.index_back(); } size_t const rb_3size(msg_size*3); /* now try to truncate the buffer. Only seqno 4,5 should remain */ /* |555---444| */ { rb_ctx ctx(rb_3size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 2); ck_assert(ctx.s2p.index_begin() != seqno_min); ck_assert(ctx.s2p.index_back() == seqno_max); void* m(ctx.add_msg(msgs[8])); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[8].g) == m); m = ctx.add_msg(msgs[9]); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[9].g) == m); m = ctx.add_msg(msgs[7]); ck_assert(NULL != m); ck_assert(msgs[7].g <= 0); ck_assert(ctx.s2p.find(msgs[7].g) == ctx.s2p.end()); // here we should have rollover ck_assert(ptr2BH(m) == BH_cast(ctx.rb.start())); seqno_min = ctx.s2p.index_front(); seqno_max = ctx.s2p.index_back(); } /* what we should have now is |***---777| - only one segment, at the end */ { /* first open this with known offset */ rb_ctx ctx0(rb_3size); ck_assert_msg(ctx0.rb.size() == ctx0.size, "Expected %zd, got %zd", ctx0.size, ctx0.rb.size()); if (ctx0.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx0.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx0.s2p.empty()); ck_assert(ctx0.s2p.size() == 1); ck_assert(ctx0.s2p.index_front() == seqno_max); ck_assert(ctx0.s2p.index_back() == seqno_max); /* now try to open unclosed file. Results should be the same */ rb_ctx ctx(rb_3size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 1); ck_assert(ctx.s2p.index_front() == seqno_max); ck_assert(ctx.s2p.index_back() == seqno_max); ck_assert(seqno_max >= 1); ck_assert(seqno_min == seqno_max); } ::unlink(RB_NAME.c_str()); /* test for single segment in the middle */ ptrdiff_t third_buffer_offset(0); { rb_ctx ctx(rb_3size, false); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(ctx.s2p.empty()); void* m(ctx.add_msg(msgs[3])); ck_assert(NULL != m); ck_assert(ctx.s2p.find(msgs[3].g) == ctx.s2p.end()); m = ctx.add_msg(msgs[4]); ck_assert(NULL != m); ck_assert(*ctx.s2p.find(msgs[4].g) == m); m = ctx.add_msg(msgs[5]); ck_assert(NULL != m); ck_assert(ctx.s2p.find(msgs[5].g) == ctx.s2p.end()); third_buffer_offset = ctx.rb.offset(m); ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 1); seqno_min = ctx.s2p.index_front(); seqno_max = ctx.s2p.index_back(); ck_assert(seqno_min == seqno_max); } /* now the situation should be |***444***| - only one segment, in the middle, * reopen the file with a known position */ { rb_ctx ctx(rb_3size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 1); ck_assert(seqno_min == ctx.s2p.index_begin()); ck_assert(seqno_max == ctx.s2p.index_back()); ck_assert(seqno_min == seqno_max); } /* now the situation should be |---444---| - only one segment,in the middle, * reopen the file a second time - to trigger a rollover bug */ { rb_ctx ctx(rb_3size); ck_assert_msg(ctx.rb.size() == ctx.size, "Expected %zd, got %zd", ctx.size, ctx.rb.size()); if (ctx.gid != GID) { std::ostringstream os; os << "Expected GID: " << GID << ", got: " << ctx.gid; ck_abort_msg("%s", os.str().c_str()); } ck_assert(!ctx.s2p.empty()); ck_assert(ctx.s2p.size() == 1); ck_assert(seqno_min == ctx.s2p.index_front()); ck_assert(seqno_max == ctx.s2p.index_back()); ck_assert(seqno_min == seqno_max); // must be allocated right after the recovered buffer void* m(ctx.add_msg(msgs[3])); ck_assert(NULL != m); ck_assert_msg(third_buffer_offset == ctx.rb.offset(m), "expected %zd, got %zd", third_buffer_offset, ctx.rb.offset(m)); } ::unlink(RB_NAME.c_str()); } END_TEST Suite* gcache_rb_suite() { Suite* ts = suite_create("gcache::RbStore"); TCase* tc = tcase_create("test"); tcase_set_timeout(tc, 60); tcase_add_test(tc, test1); suite_add_tcase(ts, tc); tc = tcase_create("recovery"); tcase_set_timeout(tc, 60); tcase_add_test(tc, recovery); suite_add_tcase(ts, tc); return ts; } galera-4-26.4.25/gcache/tests/gcache_page_test.hpp000644 000164 177776 00000000367 15107057155 023054 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy * * $Id$ */ #ifndef __gcache_page_test_hpp__ #define __gcache_page_test_hpp__ extern "C" { #include } extern Suite* gcache_page_suite(); #endif // __gcache_page_test_hpp__ galera-4-26.4.25/gcache/src/000755 000164 177776 00000000000 15107057160 016511 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcache/src/test.cpp000644 000164 177776 00000003350 15107057155 020201 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy * */ #include "GCache.hpp" #include #include using namespace gcache; #define TEST_CACHE "test.cache" int main (int argc, char* argv[]) { int ret = 0; std::string fname = "test.cache"; gu_conf_self_tstamp_on (); gu_conf_debug_on (); log_info << "Start"; log_debug << "DEBUG output enabled"; if (argc > 1) fname.assign(argv[1]); // take supplied file name if any try { gu::Config conf; GCache::register_params(conf); conf.parse("gcache.name = " TEST_CACHE "; gcache.size = 16K"); GCache* cache = new GCache (NULL, conf, ""); log_info << ""; log_info << "...do something..."; log_info << ""; delete cache; ::unlink(TEST_CACHE); log_info << "Exit: " << ret; } catch (gu::UUIDScanException& u) { gu_error("UUIDScanException: %d.", u.get_errno()); abort(); } catch (gu::NotFound& nf) { gu_error("NotFound exception."); abort(); } catch (gu::NotSet& ns) { gu_error("NotSet exception."); abort(); } catch (gu::Exception& e) { gu_error("Exception caught: %d : %s", e.get_errno(), e.what()); abort(); } catch (std::exception& e) { gu_error("Exception caught: %s\n", e.what()); abort(); } catch (...) { gu_error("GCS test caught unknown exception."); abort(); } try { throw gu::Exception ("My test exception", EINVAL); } catch (gu::Exception& e) { log_info << "Exception caught: " << e.what() << ", errno: " << e.get_errno(); } return ret; } galera-4-26.4.25/gcache/src/gcache_page_store.hpp000644 000164 177776 00000007473 15107057155 022663 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2025 Codership Oy */ /*! @file page store class */ #ifndef _gcache_page_store_hpp_ #define _gcache_page_store_hpp_ #include "gcache_memops.hpp" #include "gcache_page.hpp" #include "gcache_seqno.hpp" #include #include namespace gcache { class PageStore : public MemOps { public: PageStore (SeqnoMap& seqno_map, const std::string& dir_name, size_t keep_size, size_t page_size, int dbg, bool keep_page); ~PageStore (); static PageStore* page_store(const Page* p) { return static_cast(p->parent()); } void* malloc (size_type size); void* realloc (void* ptr, size_type size); void free (BufferHeader* bh) { assert(BH_is_released(bh)); free_page_ptr(static_cast(BH_ctx(bh)), bh); } void repossess(BufferHeader* bh) { assert(BH_is_released(bh)); static_cast(BH_ctx(bh))->repossess(bh); } void discard (BufferHeader* bh) { assert(BH_is_released(bh)); static_cast(BH_ctx(bh))->discard(bh); } void reset(); void seqno_assign(BufferHeader* bh, seqno_t s) { static_cast(BH_ctx(bh))->seqno_assign(s); } void seqno_lock(seqno_t s) { assert(s < seqno_locked_); seqno_locked_ = s; } void seqno_unlock() { seqno_locked_ = SEQNO_MAX; cleanup(); } void set_page_size (size_t size) { page_size_ = size; } void set_keep_size (size_t size) { keep_size_ = size; } void set_debug(int dbg); /* for unit tests */ size_t count() const { return count_; } size_t total_pages() const { return pages_.size(); } size_t total_size() const { return total_size_; } size_t keep_size() const { return keep_size_; } size_t keep_page() const { return keep_page_; } size_t page_size() const { return page_size_; } #ifdef GCACHE_PAGE_STORE_UNIT_TEST void wait_page_discard() const; #endif /* GCACHE_PAGE_STORE_UNIT_TEST */ private: static int const DEBUG = 4; // debug flag SeqnoMap& seqno_map_; std::string const base_name_; /* /.../.../gcache.page. */ seqno_t seqno_locked_; size_t keep_size_; /* how much pages to keep after freeing*/ size_t page_size_; /* min size of the individual page */ bool const keep_page_; /* whether to keep the last page */ size_t count_; typedef std::deque PageQueue; PageQueue pages_; Page* current_; size_t total_size_; pthread_attr_t delete_page_attr_; int debug_; mutable pthread_t delete_thr_; void new_page (size_type size); // returns true if a page could be deleted bool delete_page (); // cleans up extra pages. void cleanup (); #ifndef GCACHE_PAGE_STORE_UNIT_TEST void wait_page_discard() const; #endif /* !GCACHE_PAGE_STORE_UNIT_TEST */ void* malloc_new (size_type size); void free_page_ptr (Page* page, BufferHeader* bh) { page->free(bh); if (0 == page->used() && pages_.front() == page) cleanup(); } PageStore(const gcache::PageStore&); PageStore& operator=(const gcache::PageStore&); }; } #endif /* _gcache_page_store_hpp_ */ galera-4-26.4.25/gcache/src/CMakeLists.txt000644 000164 177776 00000001150 15107057155 021252 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # # # GCache library # add_library(gcache STATIC GCache_seqno.cpp gcache_params.cpp gcache_page.cpp gcache_page_store.cpp gcache_rb_store.cpp gcache_mem_store.cpp GCache_memops.cpp GCache.cpp ) # TODO: Fix. target_compile_options(gcache PRIVATE -Wno-conversion -Wno-unused-parameter ) target_link_libraries(gcache galerautilsxx) # # Gcache test # add_executable(gcache_test test.cpp) target_link_libraries(gcache_test gcache pthread rt) target_compile_options(gcache_test PRIVATE -Wno-conversion -Wno-unused-parameter) galera-4-26.4.25/gcache/src/gcache_memops.hpp000644 000164 177776 00000004162 15107057155 022023 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ /*! @file memory operations interface */ #ifndef _gcache_memops_hpp_ #define _gcache_memops_hpp_ #include "gcache_seqno.hpp" #include #include #include // GU_MIN_ALIGNMENT #include namespace gcache { struct BufferHeader; class MemOps { public: /* although size value passed to GCache should be representable by * a signed integer type, internally the buffer allocated will also * incur header overhead, so it has to be represented by unsigned * int. * However the difference between two internal sizes should never * exceed signed representation. */ typedef int ssize_type; // size passed to GCache typedef unsigned int size_type; // internal size representation typedef ssize_type diff_type; // difference between two size_types MemOps() {} virtual ~MemOps() noexcept(false) {} virtual void* malloc (size_type size) = 0; virtual void* realloc (void* ptr, size_type size) = 0; virtual void free (BufferHeader* bh) = 0; virtual void repossess(BufferHeader* bh) = 0; /* "unfree" */ virtual void discard (BufferHeader* bh) = 0; virtual void reset () = 0; virtual void seqno_lock(seqno_t seqno_g) = 0; virtual void seqno_unlock() = 0; /* GCache 3.x is not supposed to be portable between platforms */ static size_type const ALIGNMENT = GU_MIN_ALIGNMENT; static inline size_type align_size(size_type s) { return align(s); } static inline uint8_t* align_ptr(uint8_t* p) { return reinterpret_cast(align(uintptr_t(p))); } private: template static inline T align(T s) { return GU_ALIGN(s, ALIGNMENT); } }; } #endif /* _gcache_memops_hpp_ */ galera-4-26.4.25/gcache/src/gcache_seqno.hpp000644 000164 177776 00000002017 15107057155 021645 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2016-2025 Codership Oy */ #ifndef __GCACHE_SEQNO__ #define __GCACHE_SEQNO__ #include #include namespace gcache { typedef int64_t seqno_t; static seqno_t const SEQNO_NONE = 0; static seqno_t const SEQNO_ILL = -1; static seqno_t const SEQNO_MAX #if __GNUC__ <= 5 /* workaround for GCC 5 (and below) bug */ __attribute__((unused)) #endif = std::numeric_limits::max(); /* Protected interface to seqno map */ class SeqnoMap { public: /* Discards all seqnos upto and including seqno */ virtual void seqno_discard(const seqno_t& seqno) = 0; /* Set low limit on available senqos to > seqno * (global lock should be held while it is called) */ virtual void set_low_limit(const seqno_t& seqno) = 0; virtual ~SeqnoMap() noexcept(false) {} }; /* SeqnoMap */ } /* namespace gcache */ #endif /* __GCACHE_SEQNO__ */ galera-4-26.4.25/gcache/src/gcache_page.hpp000644 000164 177776 00000011640 15107057155 021436 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2025 Codership Oy */ /*! @file page file class */ #ifndef _gcache_page_hpp_ #define _gcache_page_hpp_ #include "gcache_seqno.hpp" #include "gcache_memops.hpp" #include "gcache_bh.hpp" #include "gu_fdesc.hpp" #include "gu_mmap.hpp" #include "gu_logger.hpp" #include #include namespace gcache { class Page : public MemOps { public: Page (void* ps, const std::string& name, size_t size, int dbg); ~Page () {} void* malloc (size_type size); void* realloc (void* ptr, size_type size); void free (BufferHeader* bh) { assert(bh >= mmap_.ptr); assert(static_cast(bh) <= (static_cast(mmap_.ptr) + mmap_.size - sizeof(BufferHeader))); assert(bh->size > 0); assert(bh->store == BUFFER_IN_PAGE); assert(bh->ctx == reinterpret_cast(this)); assert(!closed_); assert(used_ > 0); used_--; #ifndef NDEBUG if (debug_) { log_info << name() << " freed " << bh << ", used: " << used_ << ", mapped: " << mapped_; } #endif } void repossess(BufferHeader* bh) { assert(bh >= mmap_.ptr); assert(reinterpret_cast(bh) + bh->size <= next_); assert(bh->size > 0); assert(bh->seqno_g != SEQNO_NONE); assert(bh->store == BUFFER_IN_PAGE); assert(bh->ctx == reinterpret_cast(this)); assert(BH_is_released(bh)); // will be marked unreleased by caller assert(!closed_); // minimum available seqno must be adjusted // before closing the page used_++; #ifndef NDEBUG if (debug_) { log_info << name() << " repossessed " << bh << ", used: " << used_ << ", mapped: " << mapped_; } #endif } void discard (BufferHeader* bh) { assert(bh >= mmap_.ptr); assert(reinterpret_cast(bh) + bh->size <= next_); assert(bh->size > 0); assert(bh->seqno_g != SEQNO_NONE); assert(bh->store == BUFFER_IN_PAGE); assert(bh->ctx == reinterpret_cast(this)); assert(BH_is_released(bh)); assert(mapped_ > 0 || bh->seqno_g == SEQNO_ILL); mapped_ -= (bh->seqno_g != SEQNO_ILL); #ifndef NDEBUG if (bh->seqno_g != SEQNO_ILL && 0 == mapped_) assert(seqno_max_ == bh->seqno_g); if (debug_) { log_info << name() << " discarded " << bh << ", used: " << used_ << ", mapped: " << mapped_; } #endif } size_t used() const { return used_; } size_t size() const { return fd_.size(); } /* size on storage */ const std::string& name() const { return fd_.name(); } void reset (); void seqno_lock(seqno_t) {} void seqno_unlock() {} void seqno_assign(seqno_t const seqno) { assert(seqno > 0); assert(used_ > 0); // cannot assign seqno to unused buffer assert(!closed_); // cannot be closed while used seqno_max_ = std::max(seqno_max_, seqno); mapped_++; #ifndef NDEBUG if (debug_) { log_info << name() << " seqno_assign(" << seqno << ") seqno_max: " << seqno_max_ << ", used: " << used_ << ", mapped: " << mapped_; } #endif } seqno_t seqno_max() const { return seqno_max_; } void close() { assert(0 == used_); closed_ = true; } /* Drop filesystem cache on the file */ void drop_fs_cache() const; void* parent() const { return ps_; } void print(std::ostream& os) const; void set_debug(int const dbg) { debug_ = dbg; } private: gu::FileDescriptor fd_; gu::MMap mmap_; seqno_t seqno_max_; // highest seqno assigned to buffer void* const ps_; uint8_t* next_; size_t space_; size_t used_; // allocated - freed buffers size_t mapped_; // buffers mapped in seqno2ptr map int debug_; bool closed_; // page not available any more Page(const gcache::Page&); Page& operator=(const gcache::Page&); }; static inline std::ostream& operator <<(std::ostream& os, const gcache::Page& p) { p.print(os); return os; } } #endif /* _gcache_page_hpp_ */ galera-4-26.4.25/gcache/src/gcache_page.cpp000644 000164 177776 00000011242 15107057155 021427 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2025 Codership Oy */ /*! @file page file class implementation */ #include "gcache_page.hpp" #include "gcache_limits.hpp" #include #include // for posix_fadvise() #if !defined(_XOPEN_SOURCE) #define _XOPEN_SOURCE 600 #endif #include void gcache::Page::reset () { if (gu_unlikely (used_ > 0)) { log_fatal << "Attempt to reset a page '" << name() << "' used by " << used_ << " buffers. Aborting."; abort(); } space_ = mmap_.size; next_ = static_cast(mmap_.ptr); } void gcache::Page::drop_fs_cache() const { mmap_.dont_need(); #if !defined(__APPLE__) int const err (posix_fadvise (fd_.get(), 0, fd_.size(), POSIX_FADV_DONTNEED)); if (err != 0) { log_warn << "Failed to set POSIX_FADV_DONTNEED on " << fd_.name() << ": " << err << " (" << strerror(err) << ")"; } #endif } gcache::Page::Page (void* const ps, const std::string& name, size_t size, int dbg) : fd_ (name, size, true, false), mmap_ (fd_), seqno_max_(SEQNO_NONE), ps_ (ps), next_ (static_cast(mmap_.ptr)), space_(mmap_.size), used_(0), mapped_(0), debug_(dbg), closed_(false) { log_info << "Created page " << name << " of size " << space_ << " bytes"; BH_clear (reinterpret_cast(next_)); } void* gcache::Page::malloc (size_type size) { Limits::assert_size(size); if (size <= space_ && !closed_) { BufferHeader* bh(BH_cast(next_)); bh->size = size; bh->seqno_g = SEQNO_NONE; bh->ctx = reinterpret_cast(this); bh->flags = 0; bh->store = BUFFER_IN_PAGE; assert(space_ >= size); space_ -= size; next_ += size; used_++; #ifndef NDEBUG if (space_ >= sizeof(BufferHeader)) { BH_clear (BH_cast(next_)); assert (reinterpret_cast(bh + 1) < next_); } assert (next_ <= static_cast(mmap_.ptr) + mmap_.size); if (debug_) { log_info << name() << " allocd " << bh << ", used: " << used_ << ", mapped: " << mapped_; } #endif return (bh + 1); } else { log_debug << "Failed to allocate " << size << " bytes, space left: " << space_ << " bytes, total allocated: " << next_ - static_cast(mmap_.ptr); return 0; } } void* gcache::Page::realloc (void* ptr, size_type size) { Limits::assert_size(size); if (closed_) return nullptr; BufferHeader* bh(ptr2BH(ptr)); if (bh == BH_cast(next_ - bh->size)) // last buffer, can shrink and expand { diff_type const diff_size (size - bh->size); if (gu_likely (diff_size < 0 || size_t(diff_size) < space_)) { bh->size += diff_size; space_ -= diff_size; next_ += diff_size; BH_clear (BH_cast(next_)); return ptr; } else return 0; // not enough space in this page } else { if (gu_likely(size > 0 && uint32_t(size) > bh->size)) { void* const ret (malloc (size)); if (ret) { memcpy (ret, ptr, bh->size - sizeof(BufferHeader)); assert(used_ > 0); used_--; } return ret; } else { // do nothing, we can't shrink the buffer, it is locked return ptr; } } } void gcache::Page::print(std::ostream& os) const { os << "name: " << name() << ", size: " << size() << ", used: " << used() << ", mapped: " << mapped_ << ", seqno_max: " << seqno_max(); if (used_ > 0 && debug_ > 0) { bool was_released(true); const uint8_t* const start(static_cast(mmap_.ptr)); const uint8_t* p(start); assert(p != next_); while (p != next_) { ptrdiff_t const offset(p - start); const BufferHeader* const bh(BH_const_cast(p)); p += bh->size; if (!BH_is_released(bh)) { os << "\noff: " << offset << ", " << bh; was_released = false; } else { if (!was_released && p != next_) { os << "\n..."; /* indicate gap */ } was_released = true; } } } } galera-4-26.4.25/gcache/src/gcache_params.cpp000644 000164 177776 00000013305 15107057155 022000 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2018 Codership Oy */ #include "GCache.hpp" static const std::string GCACHE_PARAMS_DIR ("gcache.dir"); static const std::string GCACHE_DEFAULT_DIR (""); static const std::string GCACHE_PARAMS_RB_NAME ("gcache.name"); static const std::string GCACHE_DEFAULT_RB_NAME ("galera.cache"); static const std::string GCACHE_PARAMS_MEM_SIZE ("gcache.mem_size"); static const std::string GCACHE_DEFAULT_MEM_SIZE ("0"); static const std::string GCACHE_PARAMS_RB_SIZE ("gcache.size"); static const std::string GCACHE_DEFAULT_RB_SIZE ("128M"); static const std::string GCACHE_PARAMS_PAGE_SIZE ("gcache.page_size"); static const std::string GCACHE_DEFAULT_PAGE_SIZE (GCACHE_DEFAULT_RB_SIZE); static const std::string GCACHE_PARAMS_KEEP_PAGES_SIZE("gcache.keep_pages_size"); static const std::string GCACHE_DEFAULT_KEEP_PAGES_SIZE("0"); #ifndef NDEBUG static const std::string GCACHE_PARAMS_DEBUG ("gcache.debug"); static const std::string GCACHE_DEFAULT_DEBUG ("0"); #endif static const std::string GCACHE_PARAMS_RECOVER ("gcache.recover"); static const std::string GCACHE_DEFAULT_RECOVER ("yes"); const std::string& gcache::GCache::PARAMS_DIR (GCACHE_PARAMS_DIR); void gcache::GCache::Params::register_params(gu::Config& cfg) { cfg.add(GCACHE_PARAMS_DIR, GCACHE_DEFAULT_DIR, gu::Config::Flag::read_only); cfg.add(GCACHE_PARAMS_RB_NAME, GCACHE_DEFAULT_RB_NAME, gu::Config::Flag::read_only); cfg.add(GCACHE_PARAMS_MEM_SIZE, GCACHE_DEFAULT_MEM_SIZE, gu::Config::Flag::type_integer); cfg.add(GCACHE_PARAMS_RB_SIZE, GCACHE_DEFAULT_RB_SIZE, gu::Config::Flag::read_only | gu::Config::Flag::type_integer); cfg.add(GCACHE_PARAMS_PAGE_SIZE, GCACHE_DEFAULT_PAGE_SIZE, gu::Config::Flag::type_integer); cfg.add(GCACHE_PARAMS_KEEP_PAGES_SIZE, GCACHE_DEFAULT_KEEP_PAGES_SIZE, gu::Config::Flag::type_integer); #ifndef NDEBUG cfg.add(GCACHE_PARAMS_DEBUG, GCACHE_DEFAULT_DEBUG); #endif cfg.add(GCACHE_PARAMS_RECOVER, GCACHE_DEFAULT_RECOVER, gu::Config::Flag::read_only | gu::Config::Flag::type_bool); } static const std::string name_value (gu::Config& cfg, const std::string& data_dir) { std::string dir(cfg.get(GCACHE_PARAMS_DIR)); /* fallback to data_dir if gcache dir is not set */ if (GCACHE_DEFAULT_DIR == dir && !data_dir.empty()) { dir = data_dir; cfg.set (GCACHE_PARAMS_DIR, dir); } std::string rb_name(cfg.get (GCACHE_PARAMS_RB_NAME)); /* prepend directory name to RB file name if the former is not empty and * the latter is not an absolute path */ if ('/' != rb_name[0] && !dir.empty()) { rb_name = dir + '/' + rb_name; } return rb_name; } gcache::GCache::Params::Params (gu::Config& cfg, const std::string& data_dir) : rb_name_ (name_value (cfg, data_dir)), dir_name_ (cfg.get(GCACHE_PARAMS_DIR)), mem_size_ (cfg.get(GCACHE_PARAMS_MEM_SIZE)), rb_size_ (cfg.get(GCACHE_PARAMS_RB_SIZE)), page_size_(cfg.get(GCACHE_PARAMS_PAGE_SIZE)), keep_pages_size_(cfg.get(GCACHE_PARAMS_KEEP_PAGES_SIZE)), #ifndef NDEBUG debug_ (cfg.get(GCACHE_PARAMS_DEBUG)), #else debug_ (0), #endif recover_ (cfg.get(GCACHE_PARAMS_RECOVER)) {} void gcache::GCache::param_set (const std::string& key, const std::string& val) { if (key == GCACHE_PARAMS_RB_NAME) { gu_throw_error(EPERM) << "Can't change ring buffer name in runtime."; } else if (key == GCACHE_PARAMS_DIR) { gu_throw_error(EPERM) << "Can't change data dir in runtime."; } else if (key == GCACHE_PARAMS_MEM_SIZE) { size_t tmp_size = gu::Config::from_config(val); gu::Lock lock(mtx); /* locking here serves two purposes: ensures atomic setting of config * and params.ram_size and syncs with malloc() method */ config.set(key, tmp_size); params.mem_size(tmp_size); mem.set_max_size(params.mem_size()); } else if (key == GCACHE_PARAMS_RB_SIZE) { gu_throw_error(EPERM) << "Can't change ring buffer size in runtime."; } else if (key == GCACHE_PARAMS_PAGE_SIZE) { size_t tmp_size = gu::Config::from_config(val); gu::Lock lock(mtx); /* locking here serves two purposes: ensures atomic setting of config * and params.ram_size and syncs with malloc() method */ config.set(key, tmp_size); params.page_size(tmp_size); ps.set_page_size(params.page_size()); } else if (key == GCACHE_PARAMS_KEEP_PAGES_SIZE) { size_t tmp_size = gu::Config::from_config(val); gu::Lock lock(mtx); /* locking here serves two purposes: ensures atomic setting of config * and params.ram_size and syncs with malloc() method */ config.set(key, tmp_size); params.keep_pages_size(tmp_size); ps.set_keep_size(params.keep_pages_size()); } else if (key == GCACHE_PARAMS_RECOVER) { gu_throw_error(EINVAL) << "'" << key << "' has a meaning only on startup."; } #ifndef NDEBUG else if (key == GCACHE_PARAMS_DEBUG) { int d = gu::Config::from_config(val); gu::Lock lock(mtx); /* locking here serves two purposes: ensures atomic setting of config * and params.ram_size and syncs with malloc() method */ config.set(key, d); params.debug(d); mem.set_debug(params.debug()); rb.set_debug(params.debug()); ps.set_debug(params.debug()); } #endif else { throw gu::NotFound(); } } galera-4-26.4.25/gcache/src/gcache_rb_store.cpp000644 000164 177776 00000125677 15107057155 022354 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ #include "gcache_rb_store.hpp" #include "gcache_page_store.hpp" #include "gcache_mem_store.hpp" #include "gcache_limits.hpp" #include #include #include #include #include #include #include // std::cerr namespace gcache { static inline size_t check_size (size_t s) { return s + RingBuffer::pad_size() + sizeof(BufferHeader); } void RingBuffer::reset() { write_preamble(false); for (seqno2ptr_iter_t i = seqno2ptr_.begin(); i != seqno2ptr_.end();) { if (ptr2BH(*i)->ctx == BH_ctx_t(this)) { i = seqno2ptr_.erase(i); } else { ++i; } } first_ = start_; next_ = start_; BH_clear (BH_cast(next_)); size_free_ = size_cache_; size_used_ = 0; size_trail_= 0; // mallocs_ = 0; // reallocs_ = 0; } void RingBuffer::constructor_common() {} RingBuffer::RingBuffer (ProgressCallback* pcb, const std::string& name, size_t size, seqno2ptr_t& seqno2ptr, gu::UUID& gid, int const dbg, bool const recover) : pcb_ (pcb), fd_ (name, check_size(size)), mmap_ (fd_), preamble_ (static_cast(mmap_.ptr)), header_ (reinterpret_cast(preamble_ + PREAMBLE_LEN)), start_ (reinterpret_cast(header_ + HEADER_LEN)), end_ (reinterpret_cast(preamble_ + mmap_.size)), first_ (start_), next_ (first_), seqno2ptr_ (seqno2ptr), gid_ (gid), seqno_locked_(SEQNO_MAX), size_cache_(end_ - start_ - sizeof(BufferHeader)), size_free_ (size_cache_), size_used_ (0), size_trail_(0), // mallocs_ (0), // reallocs_ (0), debug_ (dbg & DEBUG), open_ (true) { assert((uintptr_t(start_) % MemOps::ALIGNMENT) == 0); constructor_common (); open_preamble(recover); BH_clear (BH_cast(next_)); } RingBuffer::~RingBuffer () { close_preamble(); open_ = false; mmap_.sync(); } static inline void empty_buffer(BufferHeader* const bh) //mark buffer as empty { bh->seqno_g = gcache::SEQNO_ILL; } bool buffer_is_empty(const BufferHeader* const bh) { return (SEQNO_ILL == bh->seqno_g); } /* discard all seqnos preceeding and including seqno */ bool RingBuffer::discard_seqnos(seqno2ptr_t::iterator const i_begin, seqno2ptr_t::iterator const i_end) { for (seqno2ptr_t::iterator i(i_begin); i != i_end;) { seqno2ptr_t::iterator j(i); /* advance i to next set element skipping holes */ do { ++i; } while ( i != i_end && !*i); BufferHeader* const bh(ptr2BH(*j)); if (gu_likely (BH_is_released(bh) && bh->seqno_g < seqno_locked_)) { seqno2ptr_.erase (j); switch (bh->store) { case BUFFER_IN_RB: discard(bh); break; case BUFFER_IN_MEM: { MemStore* const ms(static_cast(BH_ctx(bh))); ms->discard(bh); break; } case BUFFER_IN_PAGE: { Page* const page (static_cast(BH_ctx(bh))); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { return false; } } return true; } // returns pointer to buffer data area or 0 if no space found BufferHeader* RingBuffer::get_new_buffer (size_type const size) { assert((size % MemOps::ALIGNMENT) == 0); assert_size_free(); BH_assert_clear(BH_cast(next_)); uint8_t* ret(next_); size_type const size_next(size + sizeof(BufferHeader)); Limits::assert_size(size_next); if (ret >= first_) { assert (0 == size_trail_); // try to find space at the end size_t const end_size(end_ - ret); if (end_size >= size_next) { assert(size_free_ >= size); goto found_space; } else { // no space at the end, go from the start size_trail_ = end_size; ret = start_; } } assert (ret <= first_); if (size_t(first_ - ret) >= size_next) { assert(size_free_ >= size); } while (size_t(first_ - ret) < size_next) { // try to discard first buffer to get more space BufferHeader* bh = BH_cast(first_); if (!BH_is_released(bh) /* true also when first_ == next_ */ || (bh->seqno_g > 0 && !discard_seqno (bh->seqno_g))) { // can't free any more space, so no buffer, next_ is unchanged // and revert size_trail_ if it was set above if (next_ >= first_) size_trail_ = 0; assert_sizes(); return 0; } assert (first_ != next_); /* buffer is either discarded already, or it must have seqno */ assert (SEQNO_ILL == bh->seqno_g); first_ += bh->size; assert_size_free(); if (gu_unlikely(0 == (BH_cast(first_))->size)) { // empty header: check if we fit at the end and roll over if not assert(first_ >= next_); assert(first_ >= ret); first_ = start_; assert_size_free(); if (size_t(end_ - ret) >= size_next) { assert(size_free_ >= size); size_trail_ = 0; goto found_space; } else { size_trail_ = end_ - ret; ret = start_; } } assert(ret <= first_); } assert (ret <= first_); #ifndef NDEBUG if (size_t(first_ - ret) < size_next) { log_fatal << "Assertion ((first - ret) >= size_next) failed: " << std::endl << "first offt = " << (first_ - start_) << std::endl << "next offt = " << (next_ - start_) << std::endl << "end offt = " << (end_ - start_) << std::endl << "ret offt = " << (ret - start_) << std::endl << "size_next = " << size_next << std::endl; abort(); } #endif found_space: assert((uintptr_t(ret) % MemOps::ALIGNMENT) == 0); size_used_ += size; assert (size_used_ <= size_cache_); assert (size_free_ >= size); size_free_ -= size; BufferHeader* const bh(BH_cast(ret)); bh->size = size; bh->seqno_g = SEQNO_NONE; bh->flags = 0; bh->store = BUFFER_IN_RB; bh->ctx = reinterpret_cast(this); next_ = ret + size; assert((uintptr_t(next_) % MemOps::ALIGNMENT) == 0); assert (next_ + sizeof(BufferHeader) <= end_); BH_clear (BH_cast(next_)); assert_sizes(); return bh; } void* RingBuffer::malloc (size_type const size) { Limits::assert_size(size); void* ret(NULL); // We can reliably allocate continuous buffer which is 1/2 // of a total cache space. So compare to half the space if (size <= (size_cache_ / 2) && size <= (size_cache_ - size_used_)) { BufferHeader* const bh (get_new_buffer (size)); BH_assert_clear(BH_cast(next_)); // mallocs_++; if (gu_likely (0 != bh)) ret = bh + 1; } assert_sizes(); return ret; // "out of memory" } void RingBuffer::free (BufferHeader* const bh) { assert(BH_is_released(bh)); assert(size_used_ >= bh->size); size_used_ -= bh->size; if (SEQNO_NONE == bh->seqno_g) { empty_buffer(bh); discard (bh); } } void* RingBuffer::realloc (void* ptr, size_type const size) { Limits::assert_size(size); assert_sizes(); assert (NULL != ptr); assert (size > 0); // We can reliably allocate continuous buffer which is twice as small // as total cache area. So compare to half the space if (size > (size_cache_ / 2)) return 0; BufferHeader* const bh(ptr2BH(ptr)); // reallocs_++; // first check if we can grow this buffer by allocating // adjacent buffer { Limits::assert_size(bh->size); diff_type const adj_size(size - bh->size); if (adj_size <= 0) return ptr; uint8_t* const adj_ptr(reinterpret_cast(BH_next(bh))); if (adj_ptr == next_) { ssize_type const size_trail_saved(size_trail_); void* const adj_buf (get_new_buffer (adj_size)); BH_assert_clear(BH_cast(next_)); if (adj_ptr == adj_buf) { bh->size = next_ - static_cast(ptr) + sizeof(BufferHeader); return ptr; } else // adjacent buffer allocation failed, return it back { next_ = adj_ptr; BH_clear (BH_cast(next_)); size_used_ -= adj_size; size_free_ += adj_size; if (next_ < first_) size_trail_ = size_trail_saved; } } } BH_assert_clear(BH_cast(next_)); assert_sizes(); // find non-adjacent buffer void* ptr_new = malloc (size); if (ptr_new != 0) { memcpy (ptr_new, ptr, bh->size - sizeof(BufferHeader)); free (bh); } BH_assert_clear(BH_cast(next_)); assert_sizes(); return ptr_new; } void RingBuffer::estimate_space() { /* Estimate how much space remains */ if (first_ < next_) { /* start_ first_ next_ end_ * | |###########| | */ size_used_ = next_ - first_; size_free_ = size_cache_ - size_used_; size_trail_ = 0; } else { /* start_ next_ first_ end_ * |#######| |#####| | * ^size_trail_ */ assert(size_trail_ > 0); size_free_ = first_ - next_ + size_trail_ - sizeof(BufferHeader); size_used_ = size_cache_ - size_free_; } assert_sizes(); assert(size_free_ < size_cache_); } void RingBuffer::seqno_reset() { write_preamble(false); if (size_cache_ == size_free_) return; /* Invalidate seqnos for all ordered buffers (so that they can't be * recovered on restart. Also find the last seqno'd RB buffer. */ BufferHeader* bh(0); for (seqno2ptr_t::iterator i(seqno2ptr_.begin()); i != seqno2ptr_.end(); ++i) { BufferHeader* const b(ptr2BH(*i)); if (BUFFER_IN_RB == b->store) { #ifndef NDEBUG if (!BH_is_released(b)) { log_fatal << "Buffer " << b << " is not released."; assert(0); } #endif b->seqno_g = SEQNO_NONE; bh = b; } } if (!bh) return; /* no seqno'd buffers in RB */ assert(bh->size > 0); assert(BH_is_released(bh)); /* Seek the first unreleased buffer. * This should be called in isolation, when all seqno'd buffers are * freed, and the only unreleased buffers should come only from new * configuration. There should be no seqno'd buffers after it. */ size_t const old(size_free_); assert (0 == size_trail_ || first_ > next_); first_ = reinterpret_cast(bh); while (BH_is_released(bh)) // next_ is never released - no endless loop { first_ = reinterpret_cast(BH_next(bh)); if (gu_unlikely (0 == bh->size && first_ != next_)) { // rollover assert (first_ > next_); first_ = start_; } bh = BH_cast(first_); } BH_assert_clear(BH_cast(next_)); if (first_ == next_) { log_info << "GCache DEBUG: RingBuffer::seqno_reset(): full reset"; /* empty RB, reset it completely */ reset(); return; } assert ((BH_cast(first_))->size > 0); assert (first_ != next_); assert ((BH_cast(first_))->seqno_g == SEQNO_NONE); assert (!BH_is_released(BH_cast(first_))); estimate_space(); log_info << "GCache DEBUG: RingBuffer::seqno_reset(): discarded " << (size_free_ - old) << " bytes"; /* There is a small but non-0 probability that some released buffers * are locked within yet unreleased aborted local actions. * Seek all the way to next_, invalidate seqnos and update size_free_ */ assert(first_ != next_); assert(bh == BH_cast(first_)); long total(1); long locked(0); bh = BH_next(bh); while (bh != BH_cast(next_)) { if (gu_likely (bh->size > 0)) { total++; if (bh->seqno_g != SEQNO_NONE) { // either released or already discarded buffer assert (BH_is_released(bh)); empty_buffer(bh); discard (bh); locked++; } else { assert(!BH_is_released(bh)); } bh = BH_next(bh); } else // rollover { assert (BH_cast(next_) < bh); bh = BH_cast(start_); } } log_info << "GCache DEBUG: RingBuffer::seqno_reset(): found " << locked << '/' << total << " locked buffers"; assert_sizes(); if (next_ > first_ && first_ > start_) BH_clear(BH_cast(start_)); /* this is needed to avoid rescanning from start_ on recovery */ } void RingBuffer::print (std::ostream& os) const { os << "this: " << static_cast(this) << "\nstart_ : " << BH_cast(start_) << "\nfirst_ : " << BH_cast(first_) << ", off: " << first_ - start_ << "\nnext_ : " << BH_cast(next_) << ", off: " << next_ - start_ << "\nsize : " << size_cache_ << "\nfree : " << size_free_ << "\nused : " << size_used_; } std::string const RingBuffer::PR_KEY_VERSION = "Version:"; std::string const RingBuffer::PR_KEY_GID = "GID:"; std::string const RingBuffer::PR_KEY_SEQNO_MAX = "seqno_max:"; std::string const RingBuffer::PR_KEY_SEQNO_MIN = "seqno_min:"; std::string const RingBuffer::PR_KEY_OFFSET = "offset:"; std::string const RingBuffer::PR_KEY_SYNCED = "synced:"; void RingBuffer::write_preamble(bool const synced) { uint8_t* const preamble(reinterpret_cast(preamble_)); std::ostringstream os; os << PR_KEY_VERSION << ' ' << VERSION << '\n'; os << PR_KEY_GID << ' ' << gid_ << '\n'; if (synced) { if (!seqno2ptr_.empty()) { os << PR_KEY_SEQNO_MIN << ' ' << seqno2ptr_.index_front() << '\n'; os << PR_KEY_SEQNO_MAX << ' ' << seqno2ptr_.index_back() << '\n'; os << PR_KEY_OFFSET << ' ' << first_ - preamble << '\n'; } else { os << PR_KEY_SEQNO_MIN << ' ' << SEQNO_ILL << '\n'; os << PR_KEY_SEQNO_MAX << ' ' << SEQNO_ILL << '\n'; } } os << PR_KEY_SYNCED << ' ' << synced << '\n'; os << '\n'; ::memset(preamble_, '\0', PREAMBLE_LEN); size_t copy_len(os.str().length()); if (copy_len >= PREAMBLE_LEN) copy_len = PREAMBLE_LEN - 1; ::memcpy(preamble_, os.str().c_str(), copy_len); mmap_.sync(preamble_, copy_len); } void RingBuffer::open_preamble(bool const do_recover) { int version(0); // used only for recovery on upgrade uint8_t* const preamble(reinterpret_cast(preamble_)); long long seqno_max(SEQNO_ILL); long long seqno_min(SEQNO_ILL); off_t offset(-1); bool synced(false); { std::istringstream iss(preamble_); if (iss.fail()) gu_throw_error(EINVAL) << "Failed to open preamble."; std::string line; while (getline(iss, line), iss.good()) { std::istringstream istr(line); std::string key; istr >> key; if ('#' == key[0]) { /* comment line */ } else if (PR_KEY_VERSION == key) istr >> version; else if (PR_KEY_GID == key) istr >> gid_; else if (PR_KEY_SEQNO_MAX == key) istr >> seqno_max; else if (PR_KEY_SEQNO_MIN == key) istr >> seqno_min; else if (PR_KEY_OFFSET == key) istr >> offset; else if (PR_KEY_SYNCED == key) istr >> synced; } } if (version < 0 || version > 16) { log_warn << "Bogus version in GCache ring buffer preamble: " << version << ". Assuming 0."; version = 0; } if (offset < -1 || (preamble + offset + sizeof(BufferHeader)) > end_ || (version >= 2 && offset >= 0 && (offset % MemOps::ALIGNMENT))) { log_warn << "Bogus offset in GCache ring buffer preamble: " << offset << ". Assuming unknown."; offset = -1; } log_info << "GCache DEBUG: opened preamble:" << "\nVersion: " << version << "\nUUID: " << gid_ << "\nSeqno: " << seqno_min << " - " << seqno_max << "\nOffset: " << offset << "\nSynced: " << synced; if (do_recover) { if (gid_ != gu::UUID()) { log_info << "Recovering GCache ring buffer: version: " < class recover_progress_callback : public gu::Progress::Callback { public: recover_progress_callback(gcache::ProgressCallback* pcb) : pcb_(pcb) {} ~recover_progress_callback() {} void operator()(T const total, T const done) { if (pcb_) (*pcb_)(total, done); } private: recover_progress_callback(const recover_progress_callback&); recover_progress_callback& operator=(recover_progress_callback); ProgressCallback* pcb_; }; seqno_t RingBuffer::scan(off_t const offset, int const scan_step) { int segment_scans(0); seqno_t seqno_max(SEQNO_ILL); uint8_t* ptr; BufferHeader* bh; size_t collision_count(0); seqno_t erase_up_to(-1); uint8_t* segment_start(start_); uint8_t* segment_end(end_ - sizeof(BufferHeader)); /* start at offset (first segment) if we know it and it is valid */ if (offset >= 0) { assert(0 == (offset % scan_step)); if (start_ + offset + sizeof(BufferHeader) < segment_end) /* we know exaclty where the first segment starts */ segment_start = start_ + offset; else /* first segment is completely missing, advance scan count */ segment_scans = 1; } recover_progress_callback scan_progress_callback(pcb_); gu::Progress progress(&scan_progress_callback, "GCache::RingBuffer initial scan", " bytes", end_ - start_, 1<<22/*4Mb*/); while (segment_scans < 2) { segment_scans++; ptr = segment_start; bh = BH_cast(ptr); #define GCACHE_SCAN_BUFFER_TEST \ (BH_test(bh) && bh->size > 0 && \ ptr + bh->size <= segment_end && \ BH_test(BH_cast(ptr + bh->size))) #define GCACHE_SCAN_ADVANCE(amount) \ ptr += amount; \ progress.update(amount); \ bh = BH_cast(ptr); while (GCACHE_SCAN_BUFFER_TEST) { assert((uintptr_t(bh) % scan_step) == 0); bh->flags |= BUFFER_RELEASED; bh->ctx = uint64_t(this); seqno_t const seqno_g(bh->seqno_g); if (gu_likely(seqno_g > 0)) { bool const collision( seqno_g <= seqno_max && seqno_g >= seqno2ptr_.index_begin() && seqno2ptr_[seqno_g] != seqno2ptr_t::null_value()); if (gu_unlikely(collision)) { collision_count++; /* compare two buffers */ seqno2ptr_t::const_reference old_ptr (seqno2ptr_[seqno_g]); BufferHeader* const old_bh (old_ptr ? ptr2BH(old_ptr) : NULL); bool const same_meta(NULL != old_bh && bh->seqno_g == old_bh->seqno_g && bh->size == old_bh->size && bh->flags == old_bh->flags); const void* const new_ptr(static_cast(bh+1)); uint8_t cs_old[16] = { 0, }; uint8_t cs_new[16] = { 0, }; if (same_meta) { gu_fast_hash128(old_ptr, old_bh->size - sizeof(BufferHeader), cs_old); gu_fast_hash128(new_ptr, bh->size - sizeof(BufferHeader), cs_new); } bool const same_data(same_meta && !::memcmp(cs_old, cs_new, sizeof(cs_old))); std::ostringstream msg; msg << "Attempt (" << collision_count << ") to reuse the same seqno: " << seqno_g << ". New ptr = " << new_ptr << ", " << bh << ", cs: " << gu::Hexdump(cs_new, sizeof(cs_new)) << ", previous ptr = " << old_ptr; empty_buffer(bh); // this buffer is unusable assert(BH_is_released(bh)); if (old_bh != NULL) { msg << ", " << old_bh << ", cs: " << gu::Hexdump(cs_old,sizeof(cs_old)); if (!same_data) // no way to choose which is correct { empty_buffer(old_bh); assert(BH_is_released(old_bh)); if (erase_up_to < seqno_g) erase_up_to =seqno_g; } } log_info << msg.str(); if (same_data) { log_info << "Contents are the same, discarding " << new_ptr; } else { log_info << "Contents differ. Discarding both."; } } else { try { seqno2ptr_.insert(seqno_g, bh + 1); } catch (std::exception& e) { seqno_t const sb(seqno2ptr_.empty() ? SEQNO_ILL : seqno2ptr_.index_begin()); seqno_t const se(seqno2ptr_.empty() ? SEQNO_ILL : seqno2ptr_.index_end()); log_warn << "Exception while mapping writeset " << bh << " into [" << sb << ", " << se << "): '" << e.what() << "'. Aborting GCache recovery."; /* Buffer scanning was interrupted ungracefully - * this means that we failed to recover the most * recent writesets. As such anything that was * potentially recovered before is useless. * This will cause full cache reset in recover() */ seqno2ptr_.clear(SEQNO_ILL); BH_clear(bh); // to pacify assert() below next_ = ptr; goto out; } seqno_max = std::max(seqno_g, seqno_max); } } GCACHE_SCAN_ADVANCE(bh->size); } if (!BH_is_clear(bh)) { if (start_ == segment_start && ptr != first_ && ptr + bh->size != first_) /* ptr + bh->size == first_ means that there is only one * segment starting at first_ and the space between start_ * and first_ occupied by discarded buffers. */ { log_warn << "Failed to scan the last segment to the end. " "Last events may be missing. Last recovered event: " << gid_ << ':' << seqno_max; } /* end of segment, close it */ BH_clear(bh); } if (offset > 0 && segment_start == start_ + offset) { /* started with the first segment, jump to the second one */ assert(1 == segment_scans); first_ = segment_start; size_trail_ = end_ - ptr; // there must be at least one buffer header between the segments segment_end = segment_start - sizeof(BufferHeader); segment_start = start_; } else if (offset < 0 && segment_start == start_) { /* started with the second segment, try to find the first one */ assert(1 == segment_scans); next_ = ptr; GCACHE_SCAN_ADVANCE(sizeof(BufferHeader)); while (ptr + sizeof(BufferHeader) < end_ && !GCACHE_SCAN_BUFFER_TEST) { GCACHE_SCAN_ADVANCE(scan_step); } if (GCACHE_SCAN_BUFFER_TEST) { /* looks like a valid buffer, a beginning of a segment */ segment_start = ptr; first_ = segment_start; } else if (ptr + sizeof(BufferHeader) >= end_) { /* perhaps it was a single segment starting at start_ */ first_ = start_; break; } else { assert(0); } } else if (offset == 0 && segment_start == start_) { /* single segment case */ assert(1 == segment_scans); first_ = segment_start; next_ = ptr; break; } else { assert(2 == segment_scans); assert(offset != 0); if (offset >= 0) next_ = ptr; /* end of the second segment */ assert(first_ >= start_ && first_ < end_); assert(next_ >= start_ && next_ < end_); if (offset < 0 && segment_start > start_) { /* first (end) segment was scanned last, estimate trail */ size_trail_ = end_ - ptr; } else if (offset > 0 && next_ > first_) { size_trail_ = 0; } } #undef GCACHE_SCAN_BUFFER_TEST #undef GCACHE_SCAN_ADVANCE } // while (segment_scans < 2) out: assert(BH_is_clear(BH_cast(next_))); progress.finish(); if (debug_) { log_info << "RB: end of scan(): seqno2ptr: " << seqno2ptr_.index_begin() << " - " << seqno2ptr_.index_end() << ", seqno_max: " << seqno_max; log_info << "RB: " << *this; dump_map(); } return erase_up_to; } static bool assert_ptr_seqno(seqno2ptr_t& map, const void* const ptr, seqno_t const seqno) { const BufferHeader* const bh(ptr2BH(ptr)); if (bh->seqno_g != seqno) { assert(0); map.clear(SEQNO_NONE); return true; } return false; } void RingBuffer::recover(off_t const offset, int version) { static const char* const diag_prefix ="Recovering GCache ring buffer: "; /* scan the buffer and populate seqno2ptr map */ seqno_t const lowest(scan(offset, version > 0 ? MemOps::ALIGNMENT : 1) + 1); /* lowest is the lowest valid seqno based on collisions during scan */ if (!seqno2ptr_.empty()) { assert(next_ <= first_ || size_trail_ == 0); assert(next_ > first_ || size_trail_ > 0); /* find the last gapless seqno sequence */ seqno2ptr_t::reverse_iterator r(seqno2ptr_.rbegin()); assert(*r); seqno_t const seqno_max(seqno2ptr_.index_back()); seqno_t seqno_min(seqno2ptr_.index_front()); /* need to search for seqno gaps */ assert(seqno_max >= lowest); if (lowest == seqno_max) { seqno2ptr_.clear(SEQNO_NONE); goto full_reset; } seqno_min = seqno_max; if (assert_ptr_seqno(seqno2ptr_, *r, seqno_min)) goto full_reset; /* At this point r and seqno_min both point at the last element in * the map. Scan downwards and bail out on the first hole.*/ ++r; for (; r != seqno2ptr_.rend() && *r && seqno_min > lowest; ++r) { --seqno_min; if (assert_ptr_seqno(seqno2ptr_, *r,seqno_min)) goto full_reset; } /* At this point r points to one below seqno_min */ log_info << diag_prefix << "found gapless sequence " << seqno_min << '-' << seqno_max; if (r != seqno2ptr_.rend()) { assert(seqno_min > seqno2ptr_.index_begin()); log_info << diag_prefix << "discarding seqnos " << seqno2ptr_.index_begin() << '-' << seqno_min - 1; /* clear up seqno2ptr map */ for (; r != seqno2ptr_.rend(); ++r) { if (*r) empty_buffer(ptr2BH(*r)); } seqno2ptr_.erase(seqno2ptr_.begin(),seqno2ptr_.find(seqno_min)); } assert(seqno2ptr_.size() > 0); /* trim first_: start with the current first_ and scan forward to * the first non-empty buffer. */ BufferHeader* bh(BH_cast(first_)); assert(bh->size > sizeof(BufferHeader)); while (bh->seqno_g == SEQNO_ILL) { assert(bh->size > sizeof(BufferHeader)); bh = BH_next(bh); if (gu_unlikely(0 == bh->size)) bh = BH_cast(start_);// rollover } first_ = reinterpret_cast(bh); /* trim next_: start with the last seqno and scan forward up to the * current next_. Update to the end of the last non-empty buffer. */ bh = ptr2BH(seqno2ptr_.back()); BufferHeader* last_bh(bh); while (bh != BH_cast(next_)) { if (gu_likely(bh->size) > 0) { bool const inconsistency( BH_next(bh) > BH_cast(end_ - sizeof(BufferHeader)) || bh->ctx != BH_ctx_t(this) ); if (gu_unlikely(inconsistency)) { assert(0); log_warn << diag_prefix << "Corrupt buffer leak1: "<size > sizeof(BufferHeader)); if (bh->seqno_g > 0) last_bh = bh; bh = BH_next(bh); } else { bh = BH_cast(start_); // rollover } } next_ = reinterpret_cast(BH_next(last_bh)); /* Even if previous buffers were not aligned, make sure from * now on they are - adjust next_ pointer and last buffer size */ if (uintptr_t(next_) % MemOps::ALIGNMENT) { uint8_t* const n(MemOps::align_ptr(next_)); assert(n > next_); size_type const size_diff(n - next_); assert(size_diff < MemOps::ALIGNMENT); assert(last_bh->size > 0); last_bh->size += size_diff; next_ = n; assert(BH_next(last_bh) == BH_cast(next_)); } assert((uintptr_t(next_) % MemOps::ALIGNMENT) == 0); BH_clear(BH_cast(next_)); /* at this point we must have at least one seqno'd buffer */ assert(next_ != first_); /* as a result of trimming, trailing space may be gone */ if (first_ < next_) size_trail_ = 0; else assert(size_trail_ >= sizeof(BufferHeader)); estimate_space(); /* now discard all the locked-in buffers (see seqno_reset()) */ size_t total(0); size_t locked(0); { recover_progress_callback unused_progress_callback(pcb_); gu::Progress progress( &unused_progress_callback, "GCache::RingBuffer unused buffers scan", " bytes", size_used_, 1<<22 /* 4Mb */); bh = BH_cast(first_); while (bh != BH_cast(next_)) { if (gu_likely(bh->size > 0)) { bool const inconsistency( BH_next(bh) > BH_cast(end_ - sizeof(BufferHeader)) || bh->ctx != BH_ctx_t(this) ); if (gu_unlikely(inconsistency)) { assert(0); log_warn << diag_prefix << "Corrupt buffer leak2: " << bh; goto full_reset; } total++; if (gu_likely(bh->seqno_g > 0)) { free(bh); // on recovery no buffer is used } else { /* anything that is not ordered must be discarded */ assert(SEQNO_NONE == bh->seqno_g || SEQNO_ILL == bh->seqno_g); locked++; empty_buffer(bh); discard(bh); size_used_ -= bh->size; // size_free_ is taken care of in discard() } bh = BH_next(bh); } else { bh = BH_cast(start_); // rollover } progress.update(bh->size); } progress.finish(); } /* No buffers on recovery should be in used state */ assert(0 == size_used_); log_info << diag_prefix << "found " << locked << '/' << total << " locked buffers"; log_info << diag_prefix << "free space: " << size_free_ << '/' << size_cache_; assert_sizes(); if (debug_) { log_info << *this; dump_map(); } } else { full_reset: log_info << diag_prefix <<"Recovery failed, need to do full reset."; reset(); } } static void print_chain(const uint8_t* const rb_start, const uint8_t* const chain_start, const uint8_t* const chain_end, size_t const count, const char* const type_str) { ptrdiff_t const start_off(chain_start - rb_start); ptrdiff_t const end_off(chain_end - rb_start); std::cerr << start_off << "\t" << end_off << "\t" << end_off - start_off << "\t" << count << "\t" << type_str << std::endl; } void RingBuffer::dump_map() const { enum chain_t { ORDERED, UNORDERED, RELEASED, NONE }; static const char* chain_str[] = { "ORDERED", "UNORDERED", "RELEASED", "NONE" }; size_t chain_size[] = { 0, 0, 0, 0 }; size_t chain_count[] = { 0, 0, 0, 0 }; chain_t chain(NONE); const uint8_t* chain_start(start_); size_t count; bool next(false); const uint8_t* ptr(start_); const BufferHeader* bh(BH_const_cast(ptr)); log_info << "RB start_"; log_info << bh; for (int i(0); i < 2; i++) { while (!BH_is_clear(bh)) { if (first_ == ptr && i == 0) { goto first; // rare situation when there is only // one segment in the start/middle } size_t const offset(bh->size); chain_t const typ(bh->seqno_g >= 0 ? ORDERED : UNORDERED); if (chain != typ) { // new chain starts if (chain != NONE) { // old chain ends print_chain(start_, chain_start, ptr, count, chain_str[chain]); chain_count[chain] += count; } chain = typ; chain_start = ptr; count = 0; } count++; chain_size[typ] += offset; chain_size[RELEASED] += offset * BH_is_released(bh); chain_count[RELEASED] += BH_is_released(bh); ptr += offset; bh = BH_const_cast(ptr); } // old chain ends print_chain(start_, chain_start, ptr, count, chain_str[chain]); chain_count[chain] += count; if (1 == i) break; // final segment read log_info << "RB next_"; log_info << bh << ", off: " << ptr - start_; next = true; log_info << "RB middle gap: " << first_ - ptr; ptr = first_; bh = BH_const_cast(ptr); first: chain = NONE; count = 0; log_info << "RB first_"; log_info << bh << ", off: " << ptr - start_; } if (!next) { log_info << "RB next_"; } else { log_info << "RB rollover"; } log_info << bh << ", off: " << ptr - start_; log_info << "RB trailing space: " << end_ - ptr; log_info << "RB space usage:" << "\nORDERED : " << chain_size[ORDERED] << "\nUNORDERED: " << chain_size[UNORDERED] << "\nRELEASED : " << chain_size[RELEASED] << "\nNONE : " << chain_size[NONE]; log_info << "RB buf counts:" << "\nORDERED : " << chain_count[ORDERED] << "\nUNORDERED: " << chain_count[UNORDERED] << "\nRELEASED : " << chain_count[RELEASED] << "\nNONE : " << chain_count[NONE]; } } /* namespace gcache */ galera-4-26.4.25/gcache/src/gcache_rb_store.hpp000644 000164 177776 00000014314 15107057155 022342 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ /*! @file ring buffer storage class */ #ifndef _gcache_rb_store_hpp_ #define _gcache_rb_store_hpp_ #include "gcache_memops.hpp" #include "gcache_bh.hpp" #include "gcache_types.hpp" #include #include #include #include namespace gcache { class RingBuffer : public MemOps { public: RingBuffer (ProgressCallback* pcb, const std::string& name, size_t size, seqno2ptr_t& seqno2ptr, gu::UUID& gid, int dbg, bool recover); ~RingBuffer () noexcept(false); void* malloc (size_type size); void* realloc (void* ptr, size_type size); void free (BufferHeader* bh); void repossess(BufferHeader* bh) { assert(bh->size > 0); assert(bh->seqno_g != SEQNO_NONE); assert(bh->store == BUFFER_IN_RB); assert(bh->ctx == reinterpret_cast(this)); assert(BH_is_released(bh)); // will be marked unreleased by caller size_used_ += bh->size; assert(size_used_ <= size_cache_); } void discard (BufferHeader* const bh) { assert (BH_is_released(bh)); assert (BUFFER_IN_RB == bh->store); size_free_ += bh->size; assert (size_free_ <= size_cache_); bh->seqno_g = SEQNO_ILL; } size_t size () const { return size_cache_; } size_t rb_size () const { return fd_.size(); } const std::string& rb_name() const { return fd_.name(); } void reset(); void seqno_lock(seqno_t const seqno_g) { seqno_locked_ = seqno_g; } void seqno_unlock() { seqno_locked_ = SEQNO_MAX; } void seqno_reset(); /* returns true when successfully discards all seqnos in range */ bool discard_seqnos(seqno2ptr_t::iterator i_begin, seqno2ptr_t::iterator i_end); /* returns true when successfully discards all seqnos up to s */ bool discard_seqno(seqno_t s) { return discard_seqnos(seqno2ptr_.begin(), seqno2ptr_.find(s + 1)); } void print (std::ostream& os) const; static size_t pad_size() { RingBuffer* rb(0); // cppcheck-suppress nullPointer return (PREAMBLE_LEN * sizeof(*(rb->preamble_)) + // cppcheck-suppress nullPointer HEADER_LEN * sizeof(*(rb->header_))); } void assert_size_free() const { #ifndef NDEBUG if (next_ >= first_) { /* start_ first_ next_ end_ * | |###########| | */ assert(size_free_ >= (size_cache_ - (next_ - first_))); } else { /* start_ next_ first_ end_ * |#######| |#####| | */ assert(size_free_ >= size_t(first_ - next_)); } assert (size_free_ <= size_cache_); #endif } void assert_size_trail() const { #ifndef NDEBUG if (next_ >= first_) assert(0 == size_trail_); else assert(size_trail_ >= sizeof(BufferHeader)); #endif } void assert_sizes() const { assert_size_trail(); assert_size_free(); } void set_debug(int const dbg) { debug_ = dbg & DEBUG; } #ifdef GCACHE_RB_UNIT_TEST ptrdiff_t offset(const void* const ptr) const { return static_cast(ptr) - start_; } #endif void dump_map() const; private: static size_t const PREAMBLE_LEN = 1024; static size_t const HEADER_LEN = 32; // 0 - undetermined version // 1 - initial version, no buffer alignment // 2 - buffer alignemnt to GU_WORD_BYTES static int const VERSION = 2; static int const DEBUG = 2; // debug flag ProgressCallback* pcb_; gu::FileDescriptor fd_; gu::MMap mmap_; char* const preamble_; // ASCII text preamble int64_t* const header_; // cache binary header uint8_t* const start_; // start of cache area uint8_t* const end_; // first byte after cache area uint8_t* first_; // pointer to the first (oldest) buffer uint8_t* next_; // pointer to the next free space seqno2ptr_t& seqno2ptr_; gu::UUID& gid_; seqno_t seqno_locked_; size_t const size_cache_; size_t size_free_; size_t size_used_; size_t size_trail_; int debug_; bool open_; BufferHeader* get_new_buffer (size_type size); void constructor_common(); /* preamble fields */ static std::string const PR_KEY_VERSION; static std::string const PR_KEY_GID; static std::string const PR_KEY_SEQNO_MAX; static std::string const PR_KEY_SEQNO_MIN; static std::string const PR_KEY_OFFSET; static std::string const PR_KEY_SYNCED; void write_preamble(bool synced); void open_preamble(bool recover); void close_preamble(); // returns lower bound (not inclusive) of valid seqno range seqno_t scan(off_t offset, int scan_step); void recover(off_t offset, int version); void estimate_space(); RingBuffer(const gcache::RingBuffer&); RingBuffer& operator=(const gcache::RingBuffer&); #ifdef GCACHE_RB_UNIT_TEST public: uint8_t* start() const { return start_; } #endif }; inline std::ostream& operator<< (std::ostream& os, const RingBuffer& rb) { rb.print(os); return os; } } /* namespace gcache */ #endif /* _gcache_rb_store_hpp_ */ galera-4-26.4.25/gcache/src/GCache_memops.cpp000644 000164 177776 00000016464 15107057155 021726 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #include "GCache.hpp" #include #include "gu_logger.hpp" namespace gcache { void GCache::discard_buffer (BufferHeader* bh) { bh->seqno_g = SEQNO_ILL; // will never be reused switch (bh->store) { case BUFFER_IN_MEM: mem.discard (bh); break; case BUFFER_IN_RB: rb.discard (bh); break; case BUFFER_IN_PAGE: ps.discard (bh); break; default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } bool GCache::discard_seqno (seqno_t seqno) { #ifndef NDEBUG seqno_t const begin(params.debug() ? (seqno2ptr.empty() ? SEQNO_NONE : seqno2ptr.index_begin()) : SEQNO_NONE); if (params.debug()) { assert(begin > 0); log_info << "GCache::discard_seqno(" << begin << " - " << seqno << ")"; } #endif /* if we can't complete the operation, let's not even start */ if (seqno >= seqno_locked) { #ifndef NDEBUG if (params.debug()) { log_info << "GCache::discard_seqno(" << begin << " - " << seqno << "): " << seqno_locked << " is locked, bailing out."; } #endif return false; } while (seqno2ptr.index_begin() <= seqno && !seqno2ptr.empty()) { BufferHeader* const bh(ptr2BH(seqno2ptr.front())); if (gu_likely(BH_is_released(bh))) { assert (bh->seqno_g == seqno2ptr.index_begin()); assert (bh->seqno_g <= seqno); discard_buffer(bh); } else { #ifndef NDEBUG if (params.debug()) { log_info << "GCache::discard_seqno(" << begin << " - " << seqno << "): " << bh->seqno_g << " not released, bailing out."; } #endif return false; } seqno2ptr.pop_front(); } return true; } void GCache::discard_tail (seqno_t const seqno) { while (seqno2ptr.index_back() > seqno && !seqno2ptr.empty()) { BufferHeader* bh(ptr2BH(seqno2ptr.back())); assert(BH_is_released(bh)); assert(bh->seqno_g == seqno2ptr.index_back()); discard_buffer(bh); seqno2ptr.pop_back(); } } void* GCache::malloc (ssize_type const s) { assert(s >= 0); void* ptr(NULL); if (gu_likely(s > 0)) { size_type const size(MemOps::align_size(s + sizeof(BufferHeader))); gu::Lock lock(mtx); mallocs++; ptr = mem.malloc(size); if (0 == ptr) ptr = rb.malloc(size); if (0 == ptr) ptr = ps.malloc(size); #ifndef NDEBUG if (0 != ptr) buf_tracker.insert (ptr); #endif } assert((uintptr_t(ptr) % MemOps::ALIGNMENT) == 0); return ptr; } void GCache::free_common (BufferHeader* const bh) { assert(bh->seqno_g != SEQNO_ILL); BH_release(bh); seqno_t new_released(seqno_released); if (gu_likely(SEQNO_NONE != bh->seqno_g)) { #ifndef NDEBUG if (!(seqno_released + 1 == bh->seqno_g || SEQNO_NONE == seqno_released)) { log_fatal << "OOO release: seqno_released " << seqno_released << ", releasing " << bh->seqno_g; assert(0); } #endif new_released = bh->seqno_g; } #ifndef NDEBUG void* const ptr(bh + 1); std::set::iterator it = buf_tracker.find(ptr); if (it == buf_tracker.end()) { log_fatal << "Have not allocated this ptr: " << ptr; abort(); } buf_tracker.erase(it); #endif frees++; switch (bh->store) { case BUFFER_IN_MEM: mem.free (bh); break; case BUFFER_IN_RB: rb.free (bh); break; case BUFFER_IN_PAGE: ps.free (bh); break; default: log_fatal << "Memory corruption: unrecognized store: " << bh->store; abort(); } rb.assert_size_free(); seqno_released = new_released; } void GCache::free (void* ptr) { if (gu_likely(0 != ptr)) { BufferHeader* const bh(ptr2BH(ptr)); /* free() should not be used on ordered buffers, * GCache::seqno_release() should be used instead */ assert(bh->seqno_g <= 0); try { gu::Lock lock(mtx); #ifndef NDEBUG if (params.debug()) { log_info << "GCache::free() " << bh; } seqno_t const old_sr(seqno_released); #endif free_common (bh); #ifndef NDEBUG if (params.debug()) { log_info << "GCache::free() seqno_released: " << old_sr << " -> " << seqno_released; } #endif } catch(gu::Exception& e) { gu_error("GCache::free() caught exception %s.", e.what()); gu_abort(); } } else { log_warn << "Attempt to free a null pointer"; assert(0); } } void* GCache::realloc (void* const ptr, ssize_type const s) { assert(s >= 0); if (NULL == ptr) { return malloc(s); } else if (s == 0) { free (ptr); return NULL; } assert((uintptr_t(ptr) % MemOps::ALIGNMENT) == 0); size_type const size(MemOps::align_size(s + sizeof(BufferHeader))); void* new_ptr(NULL); BufferHeader* const bh(ptr2BH(ptr)); if (gu_unlikely(bh->seqno_g > 0)) // sanity check { log_fatal << "Internal program error: changing size of an ordered" << " buffer, seqno: " << bh->seqno_g << ". Aborting."; abort(); } gu::Lock lock(mtx); reallocs++; MemOps* store(0); switch (bh->store) { case BUFFER_IN_MEM: store = &mem; break; case BUFFER_IN_RB: store = &rb; break; case BUFFER_IN_PAGE: store = &ps; break; default: log_fatal << "Memory corruption: unrecognized store: " << bh->store; abort(); } new_ptr = store->realloc (ptr, size); if (0 == new_ptr) { new_ptr = malloc (size); if (0 != new_ptr) { memcpy (new_ptr, ptr, bh->size - sizeof(BufferHeader)); store->free (bh); } } #ifndef NDEBUG if (ptr != new_ptr && 0 != new_ptr) { std::set::iterator it = buf_tracker.find(ptr); if (it != buf_tracker.end()) buf_tracker.erase(it); it = buf_tracker.find(new_ptr); } #endif assert((uintptr_t(new_ptr) % MemOps::ALIGNMENT) == 0); return new_ptr; } } galera-4-26.4.25/gcache/src/gcache_mem_store.cpp000644 000164 177776 00000003321 15107057155 022504 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ #include "gcache_mem_store.hpp" #include "gcache_page_store.hpp" #include namespace gcache { bool MemStore::have_free_space (size_type size) { while ((size_ + size > max_size_) && !seqno2ptr_.empty()) { /* try to free some released bufs */ BufferHeader* const bh(ptr2BH(seqno2ptr_.front())); if (BH_is_released(bh) && bh->seqno_g < seqno_locked_) /*discard buffer*/ { seqno2ptr_.pop_front(); bh->seqno_g = SEQNO_ILL; switch (bh->store) { case BUFFER_IN_MEM: discard(bh); break; case BUFFER_IN_RB: BH_ctx(bh)->discard(bh); break; case BUFFER_IN_PAGE: { Page* const page (static_cast(BH_ctx(bh))); PageStore* const ps (PageStore::page_store(page)); ps->discard(bh); break; } default: log_fatal << "Corrupt buffer header: " << bh; abort(); } } else { break; } } return (size_ + size <= max_size_); } void MemStore::seqno_reset() { for (std::set::iterator buf(allocd_.begin()); buf != allocd_.end();) { std::set::iterator tmp(buf); ++buf; BufferHeader* const bh(*tmp); if (bh->seqno_g != SEQNO_NONE) { assert (BH_is_released(bh)); allocd_.erase (tmp); size_ -= bh->size; ::free (bh); } } } } /* namespace gcache */ galera-4-26.4.25/gcache/src/gcache_limits.hpp000644 000164 177776 00000002774 15107057155 022033 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015 Codership Oy */ #ifndef _gcache_limits_hpp_ #define _gcache_limits_hpp_ #include "gcache_bh.hpp" #include // GU_COMPILE_ASSERT #include namespace gcache { class Limits { public: typedef MemOps::size_type size_type; typedef MemOps::ssize_type ssize_type; static ssize_type const SSIZE_MAX_ = (1ULL << (sizeof(ssize_type)*8 - 1)) - 1; static size_type const MAX_SIZE = sizeof(BufferHeader) + SSIZE_MAX_; static size_type const MIN_SIZE = sizeof(BufferHeader) + 1; static inline void assert_size(unsigned long long s) { #ifndef NDEBUG assert(s <= MAX_SIZE); assert(s >= MIN_SIZE); #endif /* NDEBUG */ } private: /* the difference between MAX_SIZE and MIN_SIZE should never exceed * diff_type capacity */ GU_COMPILE_ASSERT(MAX_SIZE > MIN_SIZE, max_min); typedef MemOps::diff_type diff_type; static diff_type const DIFF_MAX = (1ULL << (sizeof(diff_type)*8 - 1)) - 1; GU_COMPILE_ASSERT(DIFF_MAX >= 0, diff_max); GU_COMPILE_ASSERT(size_type(DIFF_MAX) >= MAX_SIZE - MIN_SIZE, max_diff); static diff_type const DIFF_MIN = -DIFF_MAX - 1; typedef long long long_long; GU_COMPILE_ASSERT(DIFF_MIN < 0, diff_min); GU_COMPILE_ASSERT(DIFF_MIN + MAX_SIZE <= MIN_SIZE, min_diff); }; /* class Limits */ } #endif /* _gcache_limits_hpp_ */ galera-4-26.4.25/gcache/src/gcache.h000644 000164 177776 00000001260 15107057155 020077 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2014 Codership Oy */ /*! * @file C-interface to GCache. */ #ifndef _gcache_h_ #define _gcache_h_ #include #ifdef __cplusplus extern "C" { #endif #include "gu_config.h" typedef struct gcache_st gcache_t; extern gcache_t* gcache_create (gu_config_t* conf, const char* data_dir); extern void gcache_destroy (gcache_t* gc); extern void* gcache_malloc (gcache_t* gc, int size); extern void gcache_free (gcache_t* gc, const void* ptr); extern void* gcache_realloc (gcache_t* gc, void* ptr, int size); extern int64_t gcache_seqno_min (gcache_t* gc); #ifdef __cplusplus } #endif #endif /* _gcache_h_ */ galera-4-26.4.25/gcache/src/GCache_seqno.cpp000644 000164 177776 00000032036 15107057155 021544 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #include "gcache_bh.hpp" #include "GCache.hpp" #include #include namespace gcache { /*! * Reinitialize seqno sequence (after SST or such) * Clears seqno->ptr map // and sets seqno_min to gtid seqno */ void GCache::seqno_reset (const gu::GTID& gtid) { gu::Lock lock(mtx); assert(seqno2ptr.empty() || seqno_max == seqno2ptr.index_back()); const seqno_t s(gtid.seqno()); if (gtid.uuid() == gid && s != SEQNO_ILL && seqno_max >= s) { if (seqno_max > s) { discard_tail(s); seqno_max = s; seqno_released = s; assert(seqno2ptr.empty() || seqno_max == seqno2ptr.index_back()); } return; } log_info << "GCache history reset: " << gu::GTID(gid, seqno_max) << " -> " << gtid; seqno_released = SEQNO_NONE; gid = gtid.uuid(); /* order is significant here */ rb.seqno_reset(); mem.seqno_reset(); seqno2ptr.clear(SEQNO_NONE); seqno_max = SEQNO_NONE; } /*! * Assign sequence number to buffer pointed to by ptr */ void GCache::seqno_assign (const void* const ptr, seqno_t const seqno_g, uint8_t const type, bool const skip) { gu::Lock lock(mtx); BufferHeader* bh = ptr2BH(ptr); assert (SEQNO_NONE == bh->seqno_g); assert (seqno_g > 0); assert (!BH_is_released(bh)); if (gu_likely(seqno_g > seqno_max)) { seqno_max = seqno_g; } else { seqno2ptr_iter_t const i(seqno2ptr.find(seqno_g)); if (i != seqno2ptr.end()) { const void* const prev_ptr(*i); if (!seqno2ptr_t::not_set(prev_ptr)) { const BufferHeader* const prev_bh(ptr2BH(prev_ptr)); assert(0); gu_throw_fatal << "Attempt to reuse the same seqno: " << seqno_g <<". New buffer: " << bh << ", previous buffer: " << prev_bh; } } seqno_released = std::min(seqno_released, seqno_g - 1); } seqno2ptr.insert(seqno_g, ptr); bh->seqno_g = seqno_g; bh->flags |= (BUFFER_SKIPPED * skip); bh->type = type; if (BUFFER_IN_PAGE == bh->store) ps.seqno_assign(bh, seqno_g); } /*! * Mark buffer to be skipped */ void GCache::seqno_skip (const void* const ptr, seqno_t const seqno_g, uint8_t const type) { gu::Lock lock(mtx); BufferHeader* const bh(ptr2BH(ptr)); seqno2ptr_iter_t p = seqno2ptr.find(seqno_g); /* sanity checks */ int reason(0); std::ostringstream msg; if (seqno_g <= 0) { msg << "invalid seqno: " << seqno_g; reason = 1; } else if (seqno_g != bh->seqno_g) { msg << "seqno " << seqno_g << " does not match ptr seqno " << bh->seqno_g; reason = 2; } else if (type != bh->type) { msg << "type " << type << " does not match ptr type " << bh->type; reason = 3; } else if (p == seqno2ptr.end()) { msg << "seqno " << seqno_g << " not found in the map"; reason = 4; } else if (ptr != *p) { msg << "ptr " << seqno_g << " does not match mapped ptr " << *p; reason = 5; } assert(0 == reason); if (0 != reason) { gu_throw_fatal << "Skipping seqno sanity check failed: " << msg.str() << " (reason " << reason << ")"; } assert (!BH_is_released(bh)); assert (!BH_is_skipped(bh)); bh->flags |= BUFFER_SKIPPED; } void GCache::seqno_release (seqno_t const seqno) { assert (seqno > 0); /* The number of buffers scheduled for release is unpredictable, so * we want to allow some concurrency in cache access by releasing * buffers in small batches */ static int const min_batch_size(32); /* Although extremely unlikely, theoretically concurrent access may * lead to elements being added faster than released. The following is * to control and possibly disable concurrency in that case. We start * with min_batch_size and increase it if necessary. */ size_t old_gap(-1); int batch_size(min_batch_size); bool loop(true); while(loop) { gu::Lock lock(mtx); #ifndef NDEBUG if (seqno < seqno_released || seqno >= seqno_locked) { if (params.debug()) { log_info << "GCache::seqno_release(" << seqno << "): seqno_released: " << seqno_released << ", seqno_locked: " << seqno_locked; } } #endif seqno_t idx(seqno2ptr.upper_bound(seqno_released)); if (gu_unlikely(idx == seqno2ptr.index_end())) { /* This means that there are no elements with * seqno following seqno_released - and this should not * generally happen. But it looks like stopcont test does it. */ if (SEQNO_NONE != seqno_released) { log_debug << "Releasing seqno " << seqno << " before " << seqno_released + 1 << " was assigned."; } return; } assert(seqno_max >= seqno_released); /* here we check if (seqno_max - seqno_released) is decreasing * and if not - increase the batch_size (linearly) */ size_t const new_gap(seqno_max - seqno_released); batch_size += (new_gap >= old_gap) * min_batch_size; old_gap = new_gap; seqno_t const start (idx - 1); seqno_t const end (seqno - start >= 2*batch_size ? start + batch_size : seqno); #ifndef NDEBUG if (params.debug()) { log_info << "GCache::seqno_release(" << seqno << "): " << (seqno - start) << " buffers, batch_size: " << batch_size << ", end: " << end; } seqno_t const old_sr(seqno_released); #endif while((loop = (idx < seqno2ptr.index_end())) && idx <= end) { assert(idx != SEQNO_NONE); BufferHeader* const bh(ptr2BH(seqno2ptr[idx])); assert (bh->seqno_g == idx); #ifndef NDEBUG if (!(seqno_released + 1 == idx || seqno_released == SEQNO_NONE)) { log_info << "seqno_released: " << seqno_released << "; seqno_locked: " << seqno_locked << "; idx: " << idx << "; seqno2ptr.begin: " < " << seqno_released; } #endif } } void GCache::seqno_discard(const seqno_t& seqno) { #ifndef NDEBUG if (params.debug()) { log_info << "GCache::seqno_discard(" << seqno << ") index_begin: " << seqno2ptr.index_begin(); } #endif /* Gradually discard seqnos in reasonsbly sized batches */ while (true) { static int const batch_size(1024); gu::Lock lock(mtx); assert(seqno <= seqno_low_); if (seqno2ptr.empty()) break; seqno_t const s_min(seqno2ptr.index_begin()); if (s_min > seqno) break; // can't discard unassigned seqno assert(seqno_low_ <= seqno2ptr.index_end()); seqno_t const s_max(std::min(s_min + batch_size, seqno)); discard_seqno(s_max); } } /*! * Move lock to a given seqno. Throw gu::NotFound if seqno is not in * cache. * @throws NotFound */ void GCache::seqno_lock (seqno_t const seqno_g) { gu::Lock lock(mtx); assert(seqno_g > 0); assert(SEQNO_MAX == seqno_locked || seqno_locked_count > 0); assert(0 == seqno_locked_count || seqno_locked < SEQNO_MAX); if (seqno_g <= seqno_low_) throw gu::NotFound(); seqno2ptr.at(seqno_g); /* check that the element exists */ seqno_locked_count++; if (seqno_g < seqno_locked) { seqno_locked = seqno_g; mem.seqno_lock(seqno_locked); rb.seqno_lock(seqno_locked); ps.seqno_lock(seqno_locked); } } /*! * Get pointer to buffer identified by seqno. * Repossesses the buffer if it was already released, preventing its * (and following buffers) discarding. * @throws NotFound */ const void* GCache::seqno_get_ptr (seqno_t const seqno_g, ssize_t& size) { gu::Lock lock(mtx); const void* const ptr(seqno2ptr.at(seqno_g)); assert (ptr); BufferHeader* const bh(ptr2BH(ptr)); assert(seqno_g == bh->seqno_g); if (BH_is_released(bh)) // repossess and revert the effects of free() { #ifndef NDEBUG buf_tracker.insert(ptr); #endif seqno_released = std::min(seqno_released, bh->seqno_g - 1); mallocs++; // to match the resulting frees count // notify store switch (bh->store) { case BUFFER_IN_MEM: mem.repossess(bh); break; case BUFFER_IN_RB: rb.repossess (bh); break; case BUFFER_IN_PAGE: ps.repossess (bh); break; default: assert(0); } bh->flags &= ~BUFFER_RELEASED; // clear released flag } size = bh->size - sizeof(BufferHeader); return ptr; } size_t GCache::seqno_get_buffers (std::vector& v, seqno_t const start) { size_t const max(v.size()); assert (max > 0); size_t found(0); { gu::Lock lock(mtx); assert(seqno_locked <= start); // the caller should have locked the range first seqno2ptr_iter_t p = seqno2ptr.find(start); if (p != seqno2ptr.end() && *p) { do { assert(seqno2ptr.index(p) == seqno_t(start + found)); assert(*p); v[found].set_ptr(*p); } while (++found < max && ++p != seqno2ptr.end() && *p); /* the last condition ensures seqno continuty, #643 */ } } // the following may cause IO for (size_t i(0); i < found; ++i) { const BufferHeader* const bh (ptr2BH(v[i].ptr())); assert (bh->seqno_g == seqno_t(start + i)); Limits::assert_size(bh->size); v[i].set_other (bh->seqno_g, bh->size - sizeof(BufferHeader), BH_is_skipped(bh), bh->type); } return found; } /*! * Releases any history locks present. */ void GCache::seqno_unlock () { gu::Lock lock(mtx); if (seqno_locked_count > 0) { assert(seqno_locked < SEQNO_MAX); seqno_locked_count--; if (0 == seqno_locked_count) { seqno_locked = SEQNO_MAX; mem.seqno_unlock(); rb.seqno_unlock(); ps.seqno_unlock(); } } else { assert(SEQNO_MAX == seqno_locked); assert(0); // something wrong with the caller's logic seqno_locked = SEQNO_MAX; } } } galera-4-26.4.25/gcache/src/gcache_mem_store.hpp000644 000164 177776 00000010531 15107057155 022512 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ /*! @file mem store class */ #ifndef _gcache_mem_store_hpp_ #define _gcache_mem_store_hpp_ #include "gcache_memops.hpp" #include "gcache_bh.hpp" #include "gcache_types.hpp" #include "gcache_limits.hpp" #include #include namespace gcache { class MemStore : public MemOps { public: MemStore (size_t const max_size, seqno2ptr_t& seqno2ptr, int const dbg) : max_size_ (max_size), size_ (0), allocd_ (), seqno2ptr_(seqno2ptr), seqno_locked_(SEQNO_MAX), debug_ (dbg & DEBUG) {} void reset () { for (std::set::iterator bh(allocd_.begin()); bh != allocd_.end(); ++bh) { ::free (*bh); } allocd_.clear(); size_ = 0; } ~MemStore () { reset(); } void* malloc (size_type size) { Limits::assert_size(size); if (size > max_size_ || have_free_space(size) == false) return 0; assert (size_ + size <= max_size_); BufferHeader* bh (BH_cast (::malloc (size))); if (gu_likely(0 != bh)) { allocd_.insert(bh); bh->size = size; bh->seqno_g = SEQNO_NONE; bh->flags = 0; bh->store = BUFFER_IN_MEM; bh->ctx = reinterpret_cast(this); size_ += size; return (bh + 1); } return 0; } void free (BufferHeader* bh) { assert(bh->size > 0); assert(bh->size <= size_); assert(bh->store == BUFFER_IN_MEM); assert(bh->ctx == reinterpret_cast(this)); if (SEQNO_NONE == bh->seqno_g) discard (bh); } void repossess(BufferHeader* bh) { assert(bh->size > 0); assert(bh->seqno_g != SEQNO_NONE); assert(bh->store == BUFFER_IN_MEM); assert(bh->ctx == reinterpret_cast(this)); assert(BH_is_released(bh)); // will be marked unreleased by caller } void* realloc (void* ptr, size_type size) { if (!ptr) return malloc(size); BufferHeader* bh(ptr2BH(ptr)); assert (SEQNO_NONE == bh->seqno_g); size_type const old_size(bh->size); diff_type const diff_size(size - old_size); if (diff_size == 0) return ptr; if (size > max_size_ || have_free_space(diff_size) == false) return 0; assert (size_ + diff_size <= max_size_); BufferHeader* const orig(bh); bh = BH_cast(::realloc(bh, size)); if (bh != nullptr) { if (bh != orig) { allocd_.erase(orig); allocd_.insert(bh); } assert (bh->size == old_size); bh->size = size; size_ += diff_size; return (bh + 1); } else { assert(size > 0); /* orginal buffer is still allocated so we keep it in allocd_*/ } return 0; } void discard (BufferHeader* bh) { assert (BH_is_released(bh)); assert (bh->seqno_g < seqno_locked_); size_ -= bh->size; allocd_.erase(bh); ::free (bh); } void set_max_size (size_t size) { max_size_ = size; } void seqno_reset(); // for unit tests only size_t _allocd () const { return size_; } void set_debug(int const dbg) { debug_ = dbg & DEBUG; } void seqno_lock(seqno_t const seqno_g) { seqno_locked_ = seqno_g; } void seqno_unlock() { seqno_locked_ = SEQNO_MAX; } private: static int const DEBUG = 1; bool have_free_space (size_type size); size_t max_size_; size_t size_; std::set allocd_; seqno2ptr_t& seqno2ptr_; seqno_t seqno_locked_; int debug_; }; } #endif /* _gcache_mem_store_hpp_ */ galera-4-26.4.25/gcache/src/SConscript000644 000164 177776 00000001550 15107057155 020530 0ustar00jenkinsnogroup000000 000000 Import('env') gcache_env = env.Clone() # Include paths gcache_env.Append(CPPPATH = Split(''' #/common #/galerautils/src ''')) gcache_sources = Split (''' GCache_seqno.cpp gcache_params.cpp gcache_page.cpp gcache_page_store.cpp gcache_rb_store.cpp gcache_mem_store.cpp GCache_memops.cpp GCache.cpp ''') gcache_env.StaticLibrary('gcache', gcache_sources) test_env = gcache_env.Clone() test_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) test_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) test_env.Prepend(LIBS=File('libgcache.a')) test_env.Program(target = 'gcache_test', source = 'test.cpp') env.Append(LIBGALERA_OBJS = gcache_env.SharedObject(gcache_sources)) galera-4-26.4.25/gcache/src/gcache_bh.hpp000644 000164 177776 00000007701 15107057155 021116 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2018 Codership Oy * */ #ifndef __GCACHE_BUFHEAD__ #define __GCACHE_BUFHEAD__ #include "gcache_memops.hpp" #include "gcache_seqno.hpp" #include #include #include #include #include namespace gcache { static uint16_t const BUFFER_RELEASED = 1 << 0; static uint16_t const BUFFER_SKIPPED = 1 << 1; static uint16_t const BUFFER_FLAGS_MAX = (uint32_t(BUFFER_SKIPPED)<<1) - 1; enum StorageType { BUFFER_IN_MEM, BUFFER_IN_RB, BUFFER_IN_PAGE }; typedef uint64_t BH_ctx_t; struct BufferHeader { int64_t seqno_g; BH_ctx_t ctx; uint32_t size; /*! total buffer size, including header */ uint16_t flags; int8_t store; int8_t type; /*! arbitrary user defined type */ }__attribute__((__packed__)); GU_COMPILE_ASSERT(sizeof(BufferHeader().size) >= sizeof(MemOps::size_type), buffer_header_size_check); GU_COMPILE_ASSERT((sizeof(BufferHeader) % MemOps::ALIGNMENT) == 0, buffer_header_alignment_check); /*! must store pointer on both 32 and 64-bit systems */ GU_COMPILE_ASSERT(sizeof(BufferHeader().ctx) >= sizeof(void*), buffer_header_ctx_check); #define BH_cast(ptr) reinterpret_cast(ptr) #define BH_const_cast(ptr) reinterpret_cast(ptr) static inline BufferHeader* ptr2BH (const void* ptr) { return (static_cast(const_cast(ptr)) - 1); } static inline void BH_clear (BufferHeader* const bh) { ::memset(bh, 0, sizeof(BufferHeader)); } static inline bool BH_is_clear (const BufferHeader* const bh) { static const uint8_t clear_bh[sizeof(BufferHeader)] = { 0, }; return (0 == ::memcmp(bh, clear_bh, sizeof(BufferHeader))); } static inline void BH_assert_clear (const BufferHeader* const bh) { assert(0 == bh->seqno_g); assert(0 == bh->size); assert(0 == bh->ctx); assert(0 == bh->flags); assert(0 == bh->store); assert(0 == bh->type); } static inline bool BH_is_released (const BufferHeader* const bh) { return (bh->flags & BUFFER_RELEASED); } static inline bool BH_is_skipped (const BufferHeader* const bh) { return (bh->flags & BUFFER_SKIPPED); } static inline MemOps* BH_ctx (const BufferHeader* const bh) { return reinterpret_cast(bh->ctx); } static inline void BH_release (BufferHeader* const bh) { assert(!BH_is_released(bh)); bh->flags |= BUFFER_RELEASED; } static inline BufferHeader* BH_next(BufferHeader* bh) { return BH_cast((reinterpret_cast(bh) + bh->size)); } static inline std::ostream& operator << (std::ostream& os, const BufferHeader* const bh) { os << "addr: " << static_cast(bh) << ", seqno: " << bh->seqno_g << ", size: " << bh->size << ", ctx: " << BH_ctx(bh) << ", flags: " << bh->flags << ". store: " << int(bh->store) << ", type: " << int(bh->type); return os; } /* return true if ptr may point at BufferHeader */ static inline bool BH_test(const void* const ptr) { const BufferHeader* const bh(static_cast(ptr)); if (gu_likely(!BH_is_clear(bh))) { return ( bh->seqno_g >= SEQNO_ILL && int64_t(bh->size) >= int(sizeof(BufferHeader)) && // ^^^ compare signed values for better certainty ^^^ bh->flags <= BUFFER_FLAGS_MAX && bh->store == BUFFER_IN_RB ); } return true; } } /* namespace gcache */ #endif /* __GCACHE_BUFHEAD__ */ galera-4-26.4.25/gcache/src/gcache_page_store.cpp000644 000164 177776 00000017073 15107057155 022653 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2025 Codership Oy */ /*! @file page store implementation */ #include "gcache_page_store.hpp" #include "gcache_bh.hpp" #include "gcache_limits.hpp" #include #include #include #include #include #include static const std::string base_name ("gcache.page."); static std::string make_base_name (const std::string& dir_name) { if (dir_name.empty()) { return base_name; } else { if (dir_name[dir_name.length() - 1] == '/') { return (dir_name + base_name); } else { return (dir_name + '/' + base_name); } } } static std::string make_page_name (const std::string& base_name, size_t count) { std::ostringstream os; os << base_name << std::setfill ('0') << std::setw (6) << count; return os.str(); } static void remove_file (const std::string& file_name) { if (file_name.length() > 0) { if (::remove(file_name.c_str())) { int err = errno; log_error << "Failed to remove page file '" << file_name << "': " << err << " (" << strerror(err) << ")"; } else { log_info << "Deleted page " << file_name; } } else { log_error << "Empty file name in " << __FUNCTION__; } } struct delete_thread_arg { gcache::SeqnoMap& seqno_map_; gcache::Page& page_; pthread_t previous_thread_; bool debug_; delete_thread_arg(gcache::SeqnoMap& m, gcache::Page& p, pthread_t t, bool d) : seqno_map_(m), page_ (p), previous_thread_(t), debug_ (d) {} ~delete_thread_arg() { delete &page_; } }; static void* discard_page(void* __restrict__ a) { delete_thread_arg* arg(static_cast(a)); auto& page(arg->page_); #ifndef NDEBUG if (arg->debug_) { log_info << "PageStore::discard_page() prev. thread: " << arg->previous_thread_ << ", page: " << page; } #endif if (arg->previous_thread_ != pthread_t(-1)) pthread_join(arg->previous_thread_, NULL); if (page.seqno_max() > 0) arg->seqno_map_.seqno_discard(page.seqno_max()); std::string const file_name(page.name()); delete arg; remove_file(file_name); pthread_exit(NULL); } /* This method does minimum work while holding global lock and then * delegates seqno2ptr map cleanup to a dedicated thread. If there is a * previously launched thread it will be joined by the new one. */ bool gcache::PageStore::delete_page () { Page* const page = pages_.front(); #ifndef NDEBUG if (debug_) { log_info << "PageStore::delete_page() " << *page; } #endif if (page->used() > 0 || page->seqno_max() >= seqno_locked_) return false; pages_.pop_front(); total_size_ -= page->size(); if (current_ == page) current_ = 0; /* While we are still holding global lock close the page and up the * low available limit to the max seqno contained in a page */ seqno_map_.set_low_limit(page->seqno_max()); page->close(); /* if there is currently another thread running it will be joined in * this new thread */ pthread_t const saved(delete_thr_); int err = pthread_create(&delete_thr_, &delete_page_attr_, discard_page, new delete_thread_arg(seqno_map_, *page, delete_thr_, debug_)); if (0 != err) { delete_thr_ = saved; gu_throw_system_error(err) << "Failed to create page deletion thread"; } return true; } /* Deleting pages only from the beginning kinda means that some free pages * can be locked in the middle for a while. Leaving it like that for * simplicity for now. */ void gcache::PageStore::cleanup () { while (total_size_ > keep_size_ && pages_.size() > keep_page_ && delete_page()) {} } void gcache::PageStore::wait_page_discard() const { if (delete_thr_ != pthread_t(-1)) { pthread_join(delete_thr_, NULL); delete_thr_ = pthread_t(-1); } } void gcache::PageStore::reset () { while (pages_.size() > 0 && delete_page()) {}; } inline void gcache::PageStore::new_page (size_type size) { Page* const page(new Page(this, make_page_name(base_name_, count_), size, debug_)); pages_.push_back (page); total_size_ += page->size(); current_ = page; count_++; } gcache::PageStore::PageStore (SeqnoMap& seqno_map, const std::string& dir_name, size_t keep_size, size_t page_size, int dbg, bool keep_page) : seqno_map_ (seqno_map), base_name_ (make_base_name(dir_name)), seqno_locked_(SEQNO_MAX), keep_size_ (keep_size), page_size_ (page_size), keep_page_ (keep_page), count_ (0), pages_ (), current_ (0), total_size_(0), delete_page_attr_(), debug_ (dbg & DEBUG) , delete_thr_(pthread_t(-1)) { int err = pthread_attr_init (&delete_page_attr_); if (0 != err) { gu_throw_system_error(err) << "Failed to initialize page file " "deletion thread attributes"; } } gcache::PageStore::~PageStore () { try { while (pages_.size() && delete_page()) {}; if (delete_thr_ != pthread_t(-1)) pthread_join (delete_thr_, NULL); } catch (gu::Exception& e) { log_error << e.what() << " in ~PageStore()"; // abort() ? } if (pages_.size() > 0) { log_error << "Could not delete " << pages_.size() << " page files: some buffers are still \"mmapped\"."; if (debug_) for (PageQueue::iterator i(pages_.begin()); i != pages_.end(); ++i) { log_error << *(*i);; } } pthread_attr_destroy (&delete_page_attr_); } inline void* gcache::PageStore::malloc_new (size_type size) { Limits::assert_size(size); void* ret(NULL); try { new_page (page_size_ > size ? page_size_ : size); ret = current_->malloc (size); cleanup(); } catch (gu::Exception& e) { log_error << "Cannot create new cache page: " << e.what(); } return ret; } void* gcache::PageStore::malloc (size_type const size) { Limits::assert_size(size); if (gu_likely (0 != current_)) { void* ret = current_->malloc (size); if (gu_likely(0 != ret)) return ret; current_->drop_fs_cache(); } return malloc_new (size); } void* gcache::PageStore::realloc (void* ptr, size_type const size) { Limits::assert_size(size); assert(ptr != NULL); BufferHeader* const bh(ptr2BH(ptr)); Page* const page(static_cast(BH_ctx(bh))); void* ret(page->realloc(ptr, size)); if (0 != ret) return ret; ret = malloc_new (size); if (gu_likely(0 != ret)) { assert(bh->size > sizeof(BufferHeader)); size_type const ptr_size(bh->size - sizeof(BufferHeader)); memcpy (ret, ptr, size > ptr_size ? ptr_size : size); free_page_ptr (page, bh); } return ret; } void gcache::PageStore::set_debug(int const dbg) { debug_ = dbg & DEBUG; for (PageQueue::iterator i(pages_.begin()); i != pages_.end(); ++i) { (*i)->set_debug(debug_); } } galera-4-26.4.25/gcache/src/GCache.cpp000644 000164 177776 00000006241 15107057155 020336 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #include "GCache.hpp" #include "gcache_bh.hpp" #include #include #include namespace gcache { void GCache::reset() { mem.reset(); rb.reset(); ps.reset(); mallocs = 0; reallocs = 0; gid = gu::UUID(); seqno_max = SEQNO_NONE; seqno_released = SEQNO_NONE; seqno_low_ = SEQNO_NONE; seqno_locked = SEQNO_MAX; seqno_locked_count = 0; seqno2ptr.clear(SEQNO_NONE); #ifndef NDEBUG buf_tracker.clear(); #endif } GCache::GCache (ProgressCallback* pcb, gu::Config& cfg, const std::string& data_dir) : config (cfg), params (config, data_dir), mtx (), seqno2ptr (SEQNO_NONE), gid (), mem (params.mem_size(), seqno2ptr, params.debug()), rb (pcb, params.rb_name(), params.rb_size(), seqno2ptr, gid, params.debug(), params.recover()), ps (*this, params.dir_name(), params.keep_pages_size(), params.page_size(), params.debug(), /* keep last page if PS is the only storage */ !((params.mem_size() + params.rb_size()) > 0)), mallocs (0), reallocs (0), frees (0), seqno_max (seqno2ptr.empty() ? SEQNO_NONE : seqno2ptr.index_back()), seqno_released(seqno_max), seqno_low_ (SEQNO_NONE), seqno_locked (SEQNO_MAX), seqno_locked_count(0) #ifndef NDEBUG ,buf_tracker() ,in_dtor(false) #endif {} GCache::~GCache () { gu::Lock lock(mtx); log_debug << "\n" << "GCache mallocs : " << mallocs << "\n" << "GCache reallocs: " << reallocs << "\n" << "GCache frees : " << frees; #ifndef NDEBUG in_dtor = true; #endif } /*! prints object properties */ void print (std::ostream& os) {} } #include "gcache.h" gcache_t* gcache_create (gu_config_t* conf, const char* data_dir) { /* this funciton is used only in tests */ gcache::GCache* gc = new gcache::GCache ( NULL, *reinterpret_cast(conf), data_dir); return reinterpret_cast(gc); } void gcache_destroy (gcache_t* gc) { gcache::GCache* gcache = reinterpret_cast(gc); delete gcache; } void* gcache_malloc (gcache_t* gc, int size) { gcache::GCache* gcache = reinterpret_cast(gc); return gcache->malloc (size); } void gcache_free (gcache_t* gc, const void* ptr) { gcache::GCache* gcache = reinterpret_cast(gc); gcache->free (const_cast(ptr)); } void* gcache_realloc (gcache_t* gc, void* ptr, int size) { gcache::GCache* gcache = reinterpret_cast(gc); return gcache->realloc (ptr, size); } int64_t gcache_seqno_min (gcache_t* gc) { gcache::GCache* gcache = reinterpret_cast(gc); return gcache->seqno_min (); } galera-4-26.4.25/gcache/src/GCache.hpp000644 000164 177776 00000020230 15107057155 020335 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #ifndef __GCACHE_H__ #define __GCACHE_H__ #include "gcache_seqno.hpp" #include "gcache_mem_store.hpp" #include "gcache_rb_store.hpp" #include "gcache_page_store.hpp" #include "gcache_types.hpp" #include #include // for gu::Mutex and gu::Cond #include #include #include #include #ifndef NDEBUG #include #endif #include namespace gcache { class GCache : public SeqnoMap { public: static const std::string& PARAMS_DIR; static void register_params(gu::Config& cfg) { Params::register_params(cfg); } /*! * Creates a new gcache file in "gcache.name" conf parameter or * in data_dir. If file already exists, it gets overwritten. */ GCache (ProgressCallback* pcb, gu::Config& cfg, const std::string& data_dir); virtual ~GCache() noexcept(false); /*! prints object properties */ void print (std::ostream& os); /* Resets storage */ void reset(); /* Memory allocation functions */ typedef MemOps::ssize_type ssize_type; void* malloc (ssize_type size); void free (void* ptr); void* realloc (void* ptr, ssize_type size); /* Seqno related functions */ /*! * Reinitialize seqno sequence (after SST or such) * Clears seqno->ptr map // and sets seqno_min to seqno. */ void seqno_reset (const gu::GTID& gtid); /*! * Assign sequence number to buffer pointed to by ptr */ void seqno_assign (const void* ptr, seqno_t seqno_g, uint8_t type, bool skip); /*! * Mark buffer to be skipped */ void seqno_skip (const void* ptr, seqno_t seqno_g, uint8_t type); /*! * Release (free) buffers up to seqno */ void seqno_release (seqno_t seqno); /*! * Discard (forget) seqnos up to and including seqno */ void seqno_discard (const seqno_t& seqno); void set_low_limit (const seqno_t& seqno) { assert(mtx.owned() || in_dtor ); seqno_low_ = std::max(seqno_low_, seqno); } /*! * Returns smallest seqno present in history */ seqno_t seqno_min() const { gu::Lock lock(mtx); if (gu_likely(!seqno2ptr.empty()) && seqno2ptr.index_end() > seqno_low_) return std::max(seqno2ptr.index_begin(), seqno_low_ + 1); else return SEQNO_ILL; } /*! * Move lock to a given seqno. * @throws gu::NotFound if seqno is not in the cache. */ void seqno_lock (seqno_t seqno_g); /*! * Get pointer to buffer identified by seqno. * Moves lock to the given seqno and clears released flag if any. * The buffer will need to be "freed" again. * @throws NotFound */ const void* seqno_get_ptr (seqno_t seqno_g, ssize_t& size); class Buffer { public: Buffer() : seqno_g_(), ptr_(), size_(), skip_(), type_() { } Buffer (const Buffer& other) : seqno_g_(other.seqno_g_), ptr_ (other.ptr_), size_ (other.size_), skip_ (other.skip_), type_ (other.type_) { } Buffer& operator= (const Buffer& other) { seqno_g_ = other.seqno_g_; ptr_ = other.ptr_; size_ = other.size_; skip_ = other.skip_; type_ = other.type_; return *this; } seqno_t seqno_g() const { return seqno_g_; } const gu::byte_t* ptr() const { return ptr_; } ssize_type size() const { return size_; } bool skip() const { return skip_; } uint8_t type() const { return type_; } protected: void set_ptr (const void* p) { ptr_ = reinterpret_cast(p); } void set_other (seqno_t g, ssize_type s, bool skp, uint8_t t) { assert(s > 0); seqno_g_ = g; size_ = s; skip_ = skp, type_ = t; } private: seqno_t seqno_g_; const gu::byte_t* ptr_; ssize_type size_; bool skip_; uint8_t type_; friend class GCache; }; /*! * Fills a vector with Buffer objects starting with seqno start * until either vector length or seqno map is exhausted. * Moves seqno lock to start. * * @retval number of buffers filled (<= v.size()) */ size_t seqno_get_buffers (std::vector& v, seqno_t start); /*! * Releases any seqno locks present. */ void seqno_unlock (); /*! @throws NotFound */ void param_set (const std::string& key, const std::string& val); static size_t const PREAMBLE_LEN; #ifdef GCACHE_UNIT_TEST const PageStore& page_store() const { return ps; } const seqno2ptr_t& seqno_map() const { return seqno2ptr; } #endif /* GCACHE_UNIT_TEST */ private: typedef MemOps::size_type size_type; void free_common (BufferHeader*); gu::Config& config; class Params { public: static void register_params(gu::Config&); Params(gu::Config&, const std::string&); const std::string& rb_name() const { return rb_name_; } const std::string& dir_name() const { return dir_name_; } size_t mem_size() const { return mem_size_; } size_t rb_size() const { return rb_size_; } size_t page_size() const { return page_size_; } size_t keep_pages_size() const { return keep_pages_size_; } int debug() const { return debug_; } bool recover() const { return recover_; } void mem_size (size_t s) { mem_size_ = s; } void page_size (size_t s) { page_size_ = s; } void keep_pages_size (size_t s) { keep_pages_size_ = s; } #ifndef NDEBUG void debug (int d) { debug_ = d; } #endif private: std::string const rb_name_; std::string const dir_name_; size_t mem_size_; size_t const rb_size_; size_t page_size_; size_t keep_pages_size_; int debug_; bool const recover_; } params; gu::Mutex mtx; seqno2ptr_t seqno2ptr; gu::UUID gid; MemStore mem; RingBuffer rb; PageStore ps; long long mallocs; long long reallocs; long long frees; seqno_t seqno_max; seqno_t seqno_released; seqno_t seqno_low_; seqno_t seqno_locked; int seqno_locked_count; #ifndef NDEBUG std::set buf_tracker; bool in_dtor; #endif void discard_buffer (BufferHeader* bh); /* returns true when successfully discards all seqnos up to s */ bool discard_seqno (seqno_t s); /* discards all seqnos greater than s */ void discard_tail (seqno_t s); // disable copying GCache (const GCache&); GCache& operator = (const GCache&); }; } #endif /* __GCACHE_H__ */ galera-4-26.4.25/gcache/src/gcache_types.hpp000644 000164 177776 00000001042 15107057155 021661 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2016-2021 Codership Oy */ #ifndef __GCACHE_TYPES__ #define __GCACHE_TYPES__ #include "gcache_seqno.hpp" #include "gu_deqmap.hpp" #include "gu_progress.hpp" namespace gcache { typedef int64_t progress_t; //should be sufficient for all kinds of progress typedef gu::Progress::Callback ProgressCallback; typedef gu::DeqMap seqno2ptr_t; typedef seqno2ptr_t::iterator seqno2ptr_iter_t; } /* namespace gcache */ #endif /* __GCACHE_TYPES__ */ galera-4-26.4.25/gcache/CMakeLists.txt000644 000164 177776 00000000152 15107057155 020464 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_subdirectory(src) add_subdirectory(tests) galera-4-26.4.25/gcache/AUTHORS000644 000164 177776 00000000042 15107057155 016772 0ustar00jenkinsnogroup000000 000000 Codership Oy galera-4-26.4.25/gcache/README000644 000164 177776 00000001515 15107057155 016610 0ustar00jenkinsnogroup000000 000000 GCache is a library to provide transparent on-disk memory buffer cache. The purpose is to allow (almost) arbitrarily big action cache without RAM consumption. It provides the usual malloc(), realloc(), free() calls plus: void seqno_assign(void*, int64_t) - assign GCS seqno to a buffer pointed to. int64_t seqno_lock_min() - get the lowest seqno present in cache, return its value. void* seqno_get_buf(int64_t) - get a pointer to buffer with a given seqno, unlock previously locked seqno and lock the current one. void seqno_release() - release currently locked seqno. Details will be determined during development. It exploits the fact that action buffers are allocated and discarded in order close to their TO. galera-4-26.4.25/gcache/configure.ac000644 000164 177776 00000006122 15107057155 020215 0ustar00jenkinsnogroup000000 000000 # Copyright (C) 2009 Codership Oy # -*- Autoconf -*- # Process this file with autoconf to produce a configure script. # AC_PREREQ(2.50) AC_INIT([libgcache], [0.1.0], [info@codership.com]) AC_CONFIG_SRCDIR([config.h.in]) AC_CANONICAL_SYSTEM AC_CONFIG_HEADER([config.h]) AM_INIT_AUTOMAKE AC_PREFIX_DEFAULT(/usr/local) # Prevent configure from guessing default CFLAGS CFLAGS="$CFLAGS" CXXFLAGS="$CXXFLAGS" # Check for debug AC_ARG_ENABLE(debug, AC_HELP_STRING([--disable-debug], [disable debugging code [[default=enabled]]]),, enable_debug="yes") if test "$enable_debug" != "no" then AM_CFLAGS="-O1 -g -fno-inline" AM_CPPFLAGS="-D_FORTIFY_SOURCE=1" else AM_CFLAGS="-O3 -g" AM_CPPFLAGS="-DNDEBUG" fi AM_CONDITIONAL(ENABLE_DEBUG, test "$enable_debug" != "no") # Checks for programs. AC_PROG_AWK AC_LANG([C++]) AC_PROG_CXX AC_REQUIRE_CPP AC_PROG_LIBTOOL AC_LANG_PUSH([C]) # AM_PATH_CHECK() is broken and doesn't #include # m4-1.4.13 can no longer handle obsolete AM_PATH_CHECK so we have to switch to # PKG_CHECK_MODULES. However CentOS-5.0 has an outdated check version, so # by checking m4 version we're trying to deduce which check macro to use. m4_define(m4_version, m4_esyscmd(m4 --version | head -n1 | cut -d \ -f 4)) m4_if(m4_version_compare(m4_version,1.4.10), 1, [PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])], [AM_PATH_CHECK()] ) AC_LANG_POP([C]) # Checks for libraries. AC_CHECK_LIB([pthread], [pthread_testcancel],, AC_MSG_ERROR([*** POSIX threads not found! ***])) AC_CHECK_LIB([galerautils], [gu_malloc_dbg],, AC_MSG_ERROR([*** galerautils not found! ***])) AC_CHECK_LIB([galerautils++], [main],, AC_MSG_ERROR([*** galerautils++ not found! ***])) # Checks for header files. AC_HEADER_STDC AC_CHECK_HEADERS([stdint.h stdlib.h string.h sys/time.h unistd.h endian.h byteswap.h]) # Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_C_INLINE AC_TYPE_SIZE_T AC_HEADER_TIME AC_STRUCT_TM AC_C_VOLATILE # Checks for library functions. AC_FUNC_ERROR_AT_LINE AC_FUNC_MALLOC AC_FUNC_REALLOC AC_CHECK_FUNCS([gettimeofday localtime_r memset strdup strerror strrchr strtol]) AC_CONFIG_FILES([Makefile src/Makefile]) AM_CFLAGS="$AM_CFLAGS -Wall -Werror -Wextra -pedantic -Wno-unused-parameter" AM_CXXFLAGS="$AM_CFLAGS -ansi -Weffc++ -Wold-style-cast -Wconversion" AM_CXXFLAGS="$AM_CXXFLAGS -fno-rtti -Wno-long-long" AM_CFLAGS="$AM_CFLAGS -std=c99" AM_LDFLAGS="-Wl,--warn-common -Wl,--fatal-warnings" AC_SUBST(AM_CFLAGS) AC_SUBST(AM_CXXFLAGS) AC_SUBST(AM_CPPFLAGS) AC_SUBST(AM_LDFLAGS) AC_OUTPUT AC_MSG_NOTICE([]) AC_MSG_NOTICE([ CFLAGS: $CFLAGS]) AC_MSG_NOTICE([AM_CFLAGS: $AM_CFLAGS]) AC_MSG_NOTICE([ CXXFLAGS: $CXXFLAGS]) AC_MSG_NOTICE([AM_CXXFLAGS: $AM_CXXFLAGS]) AC_MSG_NOTICE([ CPPFLAGS: $CPPFLAGS]) AC_MSG_NOTICE([AM_CPPFLAGS: $AM_CPPFLAGS]) AC_MSG_NOTICE([ LDFLAGS: $LDFLAGS]) AC_MSG_NOTICE([AM_LDFLAGS: $AM_LDFLAGS]) AC_MSG_NOTICE([ LIBS: $LIBS]) AC_MSG_NOTICE([]) galera-4-26.4.25/gcache/SConscript000644 000164 177776 00000000140 15107057155 017733 0ustar00jenkinsnogroup000000 000000 # SConscript for building galerautils SConscript(Split('''src/SConscript tests/SConscript''')) galera-4-26.4.25/gcache/bootstrap.sh000755 000164 177776 00000001250 15107057155 020300 0ustar00jenkinsnogroup000000 000000 #!/bin/sh # This script bootraps the build process for the freshly checked # working copy LOG=$0.log run_prog() { echo -n "Running $1... " $* 1>$LOG 2>&1 && echo "Ok" && rm -f $LOG || \ (echo "Failed. See $LOG"; return 1) } set -e # Make aclocal to search for m4 macros in /usr/local if test -d /usr/local/share/aclocal then ACLOCAL_OPTS=" -I /usr/local/share/aclocal " fi if test -x "$(which autoreconf)" then export ACLOCAL="aclocal $ACLOCAL_OPTS" run_prog autoreconf -fisv else run_prog libtoolize && \ run_prog aclocal $ACLOCAL_OPTS && \ run_prog autoheader configure.ac && \ run_prog automake -af && \ run_prog autoconf fi # galera-4-26.4.25/gcache/ChangeLog000644 000164 177776 00000000000 15107057155 017466 0ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcache/NEWS000644 000164 177776 00000000000 15107057155 016413 0ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/galerautils/000755 000164 177776 00000000000 15107057160 017024 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/galerautils/tests/000755 000164 177776 00000000000 15107057160 020166 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/galerautils/tests/gu_dbug_test.h000644 000164 177776 00000000260 15107057155 023014 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2008 Codership Oy // $Id$ #ifndef __gu_dbug_test__ #define __gu_dbug_test__ Suite *gu_dbug_suite(void); #endif /* __gu_dbug_test__ */ galera-4-26.4.25/galerautils/tests/gu_asio_test.hpp000644 000164 177776 00000000270 15107057155 023367 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2019 Codership Oy */ #ifndef GU_ASIO_TEST_HPP #define GU_ASIO_TEST_HPP #include Suite* gu_asio_suite(); #endif // GU_ASIO_TEST_HPP galera-4-26.4.25/galerautils/tests/gu_str_test.h000644 000164 177776 00000000254 15107057155 022706 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy #ifndef __gu_str_test__ #define __gu_str_test__ extern Suite *gu_str_suite(void); #endif /* __gu_str_test__ */ galera-4-26.4.25/galerautils/tests/gu_vlq_test.hpp000644 000164 177776 00000000266 15107057155 023243 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2011 Codership Oy // #ifndef GU_VLQ_TEST_HPP #define GU_VLQ_TEST_HPP #include Suite* gu_vlq_suite(); #endif // GU_VLQ_TEST_HPP galera-4-26.4.25/galerautils/tests/gu_string_utils_test.cpp000644 000164 177776 00000005276 15107057155 025170 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2020 Codership Oy #include "gu_string_utils.hpp" #include "gu_string_utils_test.hpp" using std::string; using std::vector; START_TEST(test_strsplit) { string str = "foo bar baz"; vector vec = gu::strsplit(str, ' '); ck_assert(vec.size() == 3); ck_assert(vec[0] == "foo"); ck_assert(vec[1] == "bar"); ck_assert(vec[2] == "baz"); } END_TEST START_TEST(test_tokenize) { vector vec = gu::tokenize("", 'a', 'b', false); ck_assert(vec.size() == 0); vec = gu::tokenize("", 'a', 'b', true); ck_assert(vec.size() == 1); ck_assert(vec[0] == ""); vec = gu::tokenize("a", 'a', 'b', false); ck_assert(vec.size() == 0); vec = gu::tokenize("a", 'a', 'b', true); ck_assert(vec.size() == 2); ck_assert(vec[0] == ""); ck_assert(vec[1] == ""); vec = gu::tokenize("foo bar baz"); ck_assert(vec.size() == 3); ck_assert(vec[0] == "foo"); ck_assert(vec[1] == "bar"); ck_assert(vec[2] == "baz"); vec = gu::tokenize("foo\\ bar baz"); ck_assert(vec.size() == 2); ck_assert_msg(vec[0] == "foo bar", "expected 'foo bar', found '%s'", vec[0].c_str()); ck_assert(vec[1] == "baz"); vec = gu::tokenize("foo\\;;bar;;baz;", ';', '\\', false); ck_assert(vec.size() == 3); ck_assert(vec[0] == "foo;"); ck_assert(vec[1] == "bar"); ck_assert(vec[2] == "baz"); vec = gu::tokenize("foo\\;;bar;;baz;", ';', '\\', true); ck_assert_msg(vec.size() == 5, "vetor length %zu, expected 5", vec.size()); ck_assert(vec[0] == "foo;"); ck_assert(vec[1] == "bar"); ck_assert(vec[2] == ""); ck_assert(vec[3] == "baz"); ck_assert(vec[4] == ""); } END_TEST START_TEST(test_trim) { string full1 = ".,wklerf joweji"; string full2 = full1; gu::trim (full2); ck_assert(full1 == full2); string part = " part "; gu::trim (part); ck_assert(part.length() == 4); ck_assert(0 == part.compare("part")); string empty; gu::trim (empty); ck_assert(empty.empty()); empty += ' '; empty += '\t'; empty += '\n'; empty += '\f'; ck_assert(!empty.empty()); gu::trim (empty); ck_assert_msg(empty.empty(), "string contents: '%s', expected empty", empty.c_str()); } END_TEST Suite* gu_string_utils_suite(void) { Suite* s = suite_create("String Utils"); TCase* tc; tc = tcase_create("strsplit"); tcase_add_test(tc, test_strsplit); suite_add_tcase(s, tc); tc = tcase_create("tokenize"); tcase_add_test(tc, test_tokenize); suite_add_tcase(s, tc); tc = tcase_create("trim"); tcase_add_test(tc, test_trim); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_vlq_test.cpp000644 000164 177776 00000020406 15107057155 023234 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2011-2024 Codership Oy // #include "gu_vlq.hpp" #include "gu_vlq_test.hpp" #include "gu_logger.hpp" #include "gu_inttypes.hpp" #include #include #include #include static struct valval { const unsigned long long val; const size_t size; } valarr[] = { {0x00 , 1}, {0x01 , 1}, {0x7fULL , 1}, {0x80ULL , 2}, {0x3fffULL , 2}, {0x4000ULL , 3}, {0x1fffffULL , 3}, {0x200000ULL , 4}, {0x0fffffffULL , 4}, {0x10000000ULL , 5}, {0x07ffffffffULL , 5}, {0x0800000000ULL , 6}, {0x03ffffffffffULL , 6}, {0x040000000000ULL , 7}, {0x01ffffffffffffULL , 7}, {0x02000000000000ULL , 8}, {0x00ffffffffffffffULL, 8}, {0x0100000000000000ULL, 9}, {0x7fffffffffffffffULL, 9}, {0x8000000000000000ULL, 10}, {0xffffffffffffffffULL, 10} }; // http://www.cplusplus.com/faq/sequences/arrays/sizeof-array/ template inline size_t SizeOfArray( const T(&)[ N ] ) { return N; } START_TEST(test_uleb128_size) { for (size_t i(0); i < SizeOfArray(valarr); ++i) { size_t size(gu::uleb128_size(valarr[i].val)); ck_assert_msg(size == valarr[i].size, "got size %zu, expected %zu for value 0x%llx", size, valarr[i].size, valarr[i].val); } } END_TEST START_TEST(test_uleb128_encode) { std::vector buf; for (size_t i(0); i < SizeOfArray(valarr); ++i) { buf.resize(valarr[i].size); size_t offset(gu::uleb128_encode(valarr[i].val, &buf[0], buf.size(), 0)); ck_assert_msg(offset == valarr[i].size, "got offset %zu, expected %zu for value 0x%llx", offset, valarr[i].size, valarr[i].val); } } END_TEST START_TEST(test_uleb128_decode) { std::vector buf; for (size_t i(0); i < SizeOfArray(valarr); ++i) { buf.resize(valarr[i].size); size_t offset(gu::uleb128_encode(valarr[i].val, &buf[0], buf.size(), 0)); unsigned long long val; try { offset = gu::uleb128_decode(&buf[0], buf.size(), 0, val); ck_assert_msg(offset == valarr[i].size, "got offset %zu, expected %zu for value 0x%llx", offset, valarr[i].size, valarr[i].val); ck_assert_msg(val == valarr[i].val, "got value 0x%llx, expected 0x%llx", val, valarr[i].val); } catch (gu::Exception& e) { ck_abort_msg("Exception in round %zu for encoding of size %zu: %s", i, valarr[i].size, e.what()); } } } END_TEST START_TEST(test_uleb128_misc) { std::vector buf(10); // check uint8_t whole range for (size_t i(0); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint8_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) ck_abort_msg("0x%zx != 0x%x", i, val); } // check uint16_t whole range for (size_t i(0); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint16_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) ck_abort_msg("0x%zx != 0x%x", i, val); } // check uint32_t: 0 -> 1^20 for (size_t i(0); i < (1 << 20); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint32_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) ck_abort_msg("0x%zx != 0x%x", i, val); } // check uin32_t: max - 1^20 -> max for (uint64_t i(std::numeric_limits::max() - (1 << 20)); i <= std::numeric_limits::max(); ++i) { (void)gu::uleb128_encode(static_cast(i), &buf[0], buf.size(), 0); uint32_t val; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val); if (i != val) ck_abort_msg("0x%" PRIX64 " != 0x%" PRIX32, i, val); } // uint64_t is tested for representation byte boundaries earlier, // run test just for random values for (size_t i(0); i < (1 << 16); ++i) { unsigned long long val(static_cast(rand()) * static_cast(rand())); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); unsigned long long val2; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, val2); if (val != val2) ck_abort_msg("0x%llx != 0x%llx", val, val2); } { // check that exception is thrown if target type is not // wide enough // uint8_t uint64_t val(static_cast(std::numeric_limits::max()) + 1); buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint8_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); ck_abort_msg("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // uint16_t val = static_cast(std::numeric_limits::max()) + 1; buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint16_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); ck_abort_msg("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // uint32_t val = static_cast(std::numeric_limits::max()) + 1; buf.resize(gu::uleb128_size(val)); (void)gu::uleb128_encode(val, &buf[0], buf.size(), 0); try { uint32_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); ck_abort_msg("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // check that exception is thrown if terminating byte is missing buf.resize(buf.size() - 1); try { uint64_t cval; (void)gu::uleb128_decode(&buf[0], buf.size(), 0, cval); ck_abort_msg("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } // finally check the representation that cannot be stored with // uint64_t gu::byte_t b[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, // <--- up here 9 * 7 = 63 bits 0x02}; // <--- requires two additional bits try { uint64_t cval; (void)gu::uleb128_decode(b, SizeOfArray(b), 0, cval); ck_abort_msg("exception was not thrown"); } catch (gu::Exception& e) { log_info << "expected exception: " << e.what(); } } } END_TEST Suite* gu_vlq_suite() { Suite* s(suite_create("gu::vlq")); TCase* tc; tc = tcase_create("test_uleb128_size"); tcase_add_test(tc, test_uleb128_size); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_encode"); tcase_add_test(tc, test_uleb128_encode); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_decode"); tcase_add_test(tc, test_uleb128_decode); suite_add_tcase(s, tc); tc = tcase_create("test_uleb128_misc"); tcase_add_test(tc, test_uleb128_misc); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_utils_test++.cpp000644 000164 177776 00000002626 15107057155 023724 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "gu_utils.hpp" #include "gu_utils_test++.hpp" static void assert_invalid_int(const std::string& test_str, std::ios_base& (*f)(std::ios_base&) = std::dec) { bool exception(false); try { gu::from_string(test_str); } catch (gu::NotFound) { exception = true; } ck_assert(exception); } START_TEST(test_from_string_invalid_int) { // used to parse '1' assert_invalid_int("1dummy"); assert_invalid_int("1 dummy"); assert_invalid_int("0x1whatever", std::hex); // used to parse 'd' assert_invalid_int("dummy", std::hex); } END_TEST static void assert_invalid_bool(const std::string& test_str) { bool exception(false); try { gu::from_string(test_str); } catch (gu::NotFound) { exception = true; } ck_assert(exception); } START_TEST(test_from_string_invalid_bool) { assert_invalid_bool("true 1"); } END_TEST Suite* gu_utils_cpp_suite() { Suite* s = suite_create("gu::utils"); TCase* tc; tc = tcase_create("test_from_string_invalid_int"); tcase_add_test(tc, test_from_string_invalid_int); suite_add_tcase(s, tc); tc = tcase_create("test_from_string_invalid_bool"); tcase_add_test(tc, test_from_string_invalid_bool); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_fnv_test.c000644 000164 177776 00000003472 15107057155 022667 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy // $Id$ #include "gu_fnv_test.h" #include #include static const char* const test_buf = "chongo /\\../\\"; // enable normal FNV mode for reference hash checking #define GU_FNV_NORMAL #include "../src/gu_fnv.h" START_TEST (gu_fnv32_test) { uint32_t ret = 0; gu_fnv32a_internal (test_buf, strlen(test_buf), &ret); ck_assert_msg(GU_FNV32_SEED == ret, "FNV32 failed: expected %lu, got %"PRIu32, GU_FNV32_SEED, ret); } END_TEST START_TEST (gu_fnv64_test) { uint64_t ret = 0; gu_fnv64a_internal (test_buf, strlen(test_buf), &ret); ck_assert_msg(GU_FNV64_SEED == ret, "FNV64 failed: expected %llu, got %"PRIu64, GU_FNV64_SEED, ret); } END_TEST START_TEST (gu_fnv128_test) { gu_uint128_t GU_SET128(ret, 0, 0); gu_fnv128a_internal (test_buf, strlen(test_buf), &ret); #if defined(__SIZEOF_INT128__) ck_assert_msg(GU_EQ128(GU_FNV128_SEED, ret), "FNV128 failed: expected %"PRIx64" %"PRIx64", got %"PRIx64" %"PRIx64, (uint64_t)(GU_FNV128_SEED >> 64), (uint64_t)GU_FNV128_SEED, (uint64_t)(ret >> 64), (uint64_t)ret); #else ck_assert_msg(GU_EQ128(GU_FNV128_SEED, ret), "FNV128 failed: expected %"PRIx64" %"PRIx64", got %"PRIx64" %"PRIx64, GU_FNV128_SEED.u64[GU_64HI], GU_FNV128_SEED.u64[GU_64LO], ret.u64[GU_64HI], ret.u64[GU_64LO]); #endif } END_TEST Suite *gu_fnv_suite(void) { Suite *s = suite_create("FNV hash"); TCase *tc_fnv = tcase_create("gu_fnv"); suite_add_tcase (s, tc_fnv); tcase_add_test(tc_fnv, gu_fnv32_test); tcase_add_test(tc_fnv, gu_fnv64_test); tcase_add_test(tc_fnv, gu_fnv128_test); return s; } galera-4-26.4.25/galerautils/tests/gu_time_test.h000644 000164 177776 00000000260 15107057155 023031 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_time_test__ #define __gu_time_test__ Suite *gu_time_suite(void); #endif /* __gu_time_test__ */ galera-4-26.4.25/galerautils/tests/gu_alloc_test.hpp000644 000164 177776 00000000317 15107057155 023530 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_alloc_test__ #define __gu_alloc_test__ #include extern Suite *gu_alloc_suite(void); #endif /* __gu_alloc_test__ */ galera-4-26.4.25/galerautils/tests/gu_alloc_test.cpp000644 000164 177776 00000004366 15107057155 023533 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2020 Codership Oy // $Id$ #include "../src/gu_alloc.hpp" #include "gu_alloc_test.hpp" class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; START_TEST (basic) { ssize_t const extra_size(1 << 12); /* extra size to force new page */ size_t reserved[extra_size / sizeof(size_t)]; /* size_t for alignment */ const char test0[] = "test0"; ssize_t const test0_size(sizeof(test0)); const char test1[] = "test1"; ssize_t const test1_size(sizeof(test1) + extra_size); TestBaseName test_name("gu_alloc_test"); gu::Allocator a(test_name, reserved, sizeof(reserved), sizeof(test1), 1 << 16); mark_point(); void* p; size_t r, s = 0; bool n; r = 0; s += r; mark_point(); p = a.alloc(r, n); ck_assert(0 == p); ck_assert(!n); ck_assert(a.size() == s); r = test0_size; s += r; mark_point(); p = a.alloc(r, n); ck_assert(0 != p); ck_assert(!n); ck_assert(a.size() == s); strcpy (reinterpret_cast(p), test0); r = test1_size; s += r; mark_point(); p = a.alloc(r, n); ck_assert(0 != p); ck_assert(n); /* new page must be allocated */ ck_assert(a.size() == s); strcpy (reinterpret_cast(p), test1); r = 0; s += r; mark_point(); p = a.alloc(r, n); ck_assert(0 == p); ck_assert(!n); ck_assert(a.size() == s); #ifdef GU_ALLOCATOR_DEBUG std::vector out; out.reserve (a.count()); mark_point(); size_t out_size = a.gather (out); ck_assert(out_size == test0_size + test1_size); ck_assert(out.size() == 2); ck_assert(out[0].size == test0_size); ck_assert(!strcmp(reinterpret_cast(out[0].ptr), test0)); ck_assert(out[1].size == test1_size); ck_assert(!strcmp(reinterpret_cast(out[1].ptr), test1)); #endif /* GU_ALLOCATOR_DEBUG */ } END_TEST Suite* gu_alloc_suite () { TCase* t = tcase_create ("Allocator"); tcase_add_test (t, basic); Suite* s = suite_create ("gu::Allocator"); suite_add_tcase (s, t); return s; } galera-4-26.4.25/galerautils/tests/gu_string_test.cpp000644 000164 177776 00000005453 15107057155 023745 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2020 Codership Oy * * $Id$ */ #include "../src/gu_string.hpp" #include "gu_string_test.hpp" START_TEST (ctor_test) { gu::String<8> str1; // default ck_assert(str1.size() == 0); ck_assert(strlen(str1.c_str()) == 0); const char* const test_string1("test"); gu::String<8> str2(test_string1); // from char* ck_assert(str2.size() == strlen(test_string1)); ck_assert(::strcmp(str2.c_str(), test_string1) == 0); gu::String<2> str3(str2); // copy ctor ck_assert(str3.size() == str2.size()); ck_assert(::strcmp(str2.c_str(), str3.c_str()) == 0); std::string const std_string(str3.c_str()); gu::String<4> str4(std_string); // from std::string ck_assert(str4.size() == strlen(test_string1)); ck_assert(::strcmp(str4.c_str(), test_string1) == 0); gu::String<5> str5(test_string1, 2); ck_assert(str5.size() == 2); ck_assert(::strncmp(str5.c_str(), test_string1, 2) == 0); } END_TEST START_TEST (func_test) { gu::String<16> str; ck_assert(str.size() == 0); ck_assert(strlen(str.c_str()) == 0); const char* const buf_ptr(str.c_str()); str = "one"; str << std::string("two") << gu::String<8>("three"); ck_assert(::strcmp(str.c_str(), "onetwothree") == 0); ck_assert(str.c_str() == buf_ptr); str += "blast!"; // this should spill to heap ck_assert_msg(::strcmp(str.c_str(), "onetwothreeblast!") == 0, "expected 'onetwothreeblast!' got '%s'", str.c_str()); ck_assert(str.c_str() != buf_ptr); str = gu::String<2>("back to stack"); ck_assert(str == "back to stack"); ck_assert(str == gu::String<>("back to stack")); ck_assert(str == std::string("back to stack")); ck_assert(str.c_str() == buf_ptr); typedef void* pointer; // conversions ck_assert((gu::String<>() << true) == "true"); ck_assert((gu::String<>() << 0.0123) == "0.012300"); if (sizeof(pointer) == 4) ck_assert((gu::String<>() << pointer(0xdeadbeef))=="0xdeadbeef"); else ck_assert((gu::String<>() << pointer(0xdeadbeef))=="0x00000000deadbeef"); ck_assert((gu::String<>() << 1234567890) == "1234567890"); ck_assert((gu::String<>() << 12345U) == "12345"); ck_assert((gu::String<>() << 'a') == "a"); ck_assert((gu::String<>() << 0xdeadbeef) == "3735928559"); ck_assert((gu::String<>() << gu::Fmt("%010x") << 0xdeadbeef) =="00deadbeef"); } END_TEST Suite* gu_string_suite(void) { Suite* s = suite_create ("gu::String"); TCase* t = tcase_create ("ctor_test"); tcase_add_test (t, ctor_test); suite_add_tcase (s, t); t = tcase_create ("func_test"); tcase_add_test (t, func_test); suite_add_tcase (s, t); return s; } galera-4-26.4.25/galerautils/tests/gu_uuid_test.h000644 000164 177776 00000000300 15107057155 023034 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gu_uuid_test__ #define __gu_uuid_test__ extern Suite *gu_uuid_suite(void); #endif /* __gu_uuid_test__ */ galera-4-26.4.25/galerautils/tests/gu_fifo_test.c000644 000164 177776 00000014451 15107057155 023020 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2024 Codership Oy // $Id$ #include #include "gu_fifo_test.h" #include "../src/galerautils.h" #define FIFO_LENGTH 10000L START_TEST (gu_fifo_test) { gu_fifo_t* fifo; long i; long* item; long used; fifo = gu_fifo_create (0, 1); ck_assert(fifo == NULL); fifo = gu_fifo_create (1, 0); ck_assert(fifo == NULL); fifo = gu_fifo_create (1, 1); ck_assert(fifo != NULL); gu_fifo_close (fifo); mark_point(); gu_fifo_destroy (fifo); mark_point(); fifo = gu_fifo_create (FIFO_LENGTH, sizeof(i)); ck_assert(fifo != NULL); ck_assert_msg(gu_fifo_length(fifo) == 0, "fifo->used is %ld for an empty FIFO", gu_fifo_length(fifo)); mark_point(); gu_fifo_clear(fifo); // clear empty fifo ck_assert_msg(gu_fifo_length(fifo) == 0, "fifo->used is %ld for a cleared FIFO", gu_fifo_length(fifo)); // fill FIFO for (i = 0; i < FIFO_LENGTH; i++) { item = gu_fifo_get_tail (fifo); ck_assert_msg(item != NULL, "could not get item %ld", i); *item = i; gu_fifo_push_tail (fifo); } used = i; ck_assert_msg(gu_fifo_length(fifo) == used, "used is %ld, expected %ld", used, gu_fifo_length(fifo)); mark_point(); gu_fifo_clear(fifo); // clear filled fifo ck_assert_msg(gu_fifo_length(fifo) == 0, "fifo->used is %ld for a cleared FIFO", gu_fifo_length(fifo)); // fill FIFO again for (i = 0; i < FIFO_LENGTH; i++) { item = gu_fifo_get_tail (fifo); ck_assert_msg(item != NULL, "could not get item %ld", i); *item = i; gu_fifo_push_tail (fifo); } used = i; ck_assert_msg(gu_fifo_length(fifo) == used, "used is %ld, expected %zu", used, gu_fifo_length(fifo)); // test pop for (i = 0; i < used; i++) { int err; item = gu_fifo_get_head (fifo, &err); ck_assert_msg(item != NULL, "could not get item %ld", i); ck_assert_msg(*item == i, "got %ld, expected %ld", *item, i); gu_fifo_pop_head (fifo); } ck_assert_msg(gu_fifo_length(fifo) == 0, "gu_fifo_length() for empty queue is %ld", gu_fifo_length(fifo)); gu_fifo_close (fifo); int err; item = gu_fifo_get_head (fifo, &err); ck_assert(item == NULL); ck_assert(err == -ENODATA); gu_fifo_destroy (fifo); } END_TEST static gu_mutex_t sync_mtx = GU_MUTEX_INITIALIZER; static gu_cond_t sync_cond = GU_COND_INITIALIZER; #define ITEM 12345 static void* cancel_thread (void* arg) { gu_fifo_t* q = arg; /* sync with parent */ gu_mutex_lock (&sync_mtx); gu_cond_signal (&sync_cond); gu_mutex_unlock (&sync_mtx); size_t* item; int err; /* try to get from non-empty queue */ item = gu_fifo_get_head (q, &err); ck_assert_msg(NULL == item, "Got item %p: %zu", item, item ? *item : 0); ck_assert(-ECANCELED == err); /* signal end of the first gu_fifo_get_head() */ gu_mutex_lock (&sync_mtx); gu_cond_signal (&sync_cond); /* wait until gets are resumed */ gu_cond_wait (&sync_cond, &sync_mtx); item = gu_fifo_get_head (q, &err); ck_assert(NULL != item); ck_assert(ITEM == *item); gu_fifo_pop_head (q); /* signal end of the 2nd gu_fifo_get_head() */ gu_cond_signal (&sync_cond); gu_mutex_unlock (&sync_mtx); /* try to get from empty queue (should block) */ item = gu_fifo_get_head (q, &err); ck_assert(NULL == item); ck_assert(-ECANCELED == err); /* signal end of the 3rd gu_fifo_get_head() */ gu_mutex_lock (&sync_mtx); gu_cond_signal (&sync_cond); /* wait until fifo is closed */ gu_cond_wait (&sync_cond, &sync_mtx); item = gu_fifo_get_head (q, &err); ck_assert(NULL == item); ck_assert(-ECANCELED == err); /* signal end of the 4th gu_fifo_get_head() */ gu_cond_signal (&sync_cond); /* wait until fifo is resumed */ gu_cond_wait (&sync_cond, &sync_mtx); gu_mutex_unlock (&sync_mtx); item = gu_fifo_get_head (q, &err); ck_assert(NULL == item); ck_assert(-ENODATA == err); return NULL; } START_TEST(gu_fifo_cancel_test) { gu_fifo_t* q = gu_fifo_create (FIFO_LENGTH, sizeof(size_t)); size_t* item = gu_fifo_get_tail (q); ck_assert(item != NULL); *item = ITEM; gu_fifo_push_tail (q); gu_mutex_lock (&sync_mtx); gu_thread_t thread; gu_thread_create (&thread, NULL, cancel_thread, q); /* sync with child thread */ gu_fifo_lock (q); gu_cond_wait (&sync_cond, &sync_mtx); int err; err = gu_fifo_cancel_gets (q); ck_assert(0 == err); err = gu_fifo_cancel_gets (q); ck_assert(-EBADFD == err); /* allow the first gu_fifo_get_head() */ gu_fifo_release (q); mark_point(); /* wait for the first gu_fifo_get_head() to complete */ gu_cond_wait (&sync_cond, &sync_mtx); mark_point(); err = gu_fifo_resume_gets (q); ck_assert(0 == err); err = gu_fifo_resume_gets (q); ck_assert(-EBADFD == err); /* signal that now gets are resumed */ gu_cond_signal (&sync_cond); /* wait for the 2nd gu_fifo_get_head() to complete */ gu_cond_wait (&sync_cond, &sync_mtx); /* wait a bit to make sure 3rd gu_fifo_get_head() is blocked * (even if it is not - still should work)*/ usleep (100000 /* 0.1s */); gu_fifo_lock(q); err = gu_fifo_cancel_gets (q); gu_fifo_release(q); ck_assert(0 == err); /* wait for the 3rd gu_fifo_get_head() to complete */ gu_cond_wait (&sync_cond, &sync_mtx); gu_fifo_close (q); // closes for puts, but the q still must be canceled gu_cond_signal (&sync_cond); /* wait for the 4th gu_fifo_get_head() to complete */ gu_cond_wait (&sync_cond, &sync_mtx); gu_fifo_resume_gets (q); // resumes gets gu_cond_signal (&sync_cond); gu_mutex_unlock (&sync_mtx); mark_point(); gu_thread_join(thread, NULL); gu_fifo_destroy(q); } END_TEST Suite *gu_fifo_suite(void) { Suite *s = suite_create("Galera FIFO functions"); TCase *tc = tcase_create("gu_fifo"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_fifo_test); tcase_add_test (tc, gu_fifo_cancel_test); tcase_set_timeout(tc, 60); return s; } galera-4-26.4.25/galerautils/tests/gu_mem_test.h000644 000164 177776 00000000263 15107057155 022654 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_mem_test__ #define __gu_mem_test__ extern Suite *gu_mem_suite(void); #endif /* __gu_mem_test__ */ galera-4-26.4.25/galerautils/tests/gu_tests.c000644 000164 177776 00000003562 15107057155 022201 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2017 Codership Oy // $Id$ #include // printf() #include // strcmp() #include // EXIT_SUCCESS | EXIT_FAILURE #include // unlink() #include #include "../src/gu_conf.h" #include "gu_mem_test.h" #include "gu_bswap_test.h" #include "gu_fnv_test.h" #include "gu_mmh3_test.h" #include "gu_spooky_test.h" #include "gu_crc32c_test.h" #include "gu_hash_test.h" #include "gu_dbug_test.h" #include "gu_time_test.h" #include "gu_fifo_test.h" #include "gu_uuid_test.h" #include "gu_lock_step_test.h" #include "gu_str_test.h" #include "gu_utils_test.h" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gu_mem_suite, gu_bswap_suite, gu_fnv_suite, gu_mmh3_suite, gu_spooky_suite, gu_crc32c_suite, gu_hash_suite, gu_dbug_suite, gu_time_suite, gu_fifo_suite, gu_uuid_suite, gu_lock_step_suite, gu_str_suite, gu_utils_suite, NULL }; #define LOG_FILE "gu_tests.log" int main(int argc, char* argv[]) { int no_fork = ((argc > 1) && !strcmp(argv[1], "nofork")) ? 1 : 0; int i = 0; int failed = 0; FILE* log_file = NULL; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); while (suites[i]) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all (sr, CK_NORMAL); failed += srunner_ntests_failed (sr); srunner_free (sr); i++; } if (log_file) { fclose (log_file); } if (0 == failed && NULL != log_file) unlink(LOG_FILE); printf ("Total tests failed: %d\n", failed); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; } galera-4-26.4.25/galerautils/tests/gu_rset_test.hpp000644 000164 177776 00000000312 15107057155 023406 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_rset_test__ #define __gu_rset_test__ #include Suite *gu_rset_suite(void); #endif /* __gu_rset_test__ */ galera-4-26.4.25/galerautils/tests/gu_histogram_test.cpp000644 000164 177776 00000001341 15107057155 024424 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #include "../src/gu_histogram.hpp" #include "../src/gu_logger.hpp" #include #include "gu_histogram_test.hpp" using namespace gu; START_TEST(test_histogram) { Histogram hs("0.0,0.0005,0.001,0.002,0.005,0.01,0.02,0.05,0.1,0.5,1.,5."); hs.insert(0.001); log_info << hs; for (size_t i = 0; i < 1000; ++i) { hs.insert(double(::rand())/RAND_MAX); } log_info << hs; hs.clear(); log_info << hs; } END_TEST Suite* gu_histogram_suite() { TCase* t = tcase_create ("test_histogram"); tcase_add_test (t, test_histogram); Suite* s = suite_create ("gu::Histogram"); suite_add_tcase (s, t); return s; } galera-4-26.4.25/galerautils/tests/gu_spooky_test.c000644 000164 177776 00000017521 15107057155 023422 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2024 Codership Oy /*! * Original Bob Jenkins' test implementation: * http://www.burtleburtle.net/bob/c/testspooky.cpp * * $Id$ */ #include "gu_spooky_test.h" #include "../src/gu_spooky.h" #include "../src/gu_hexdump.h" #include #define BUFSIZE 512 static uint64_t const expected[BUFSIZE] = { 0xa24295ec, 0xfe3a05ce, 0x257fd8ef, 0x3acd5217, 0xfdccf85c, 0xc7b5f143, 0x3b0c3ff0, 0x5220f13c, 0xa6426724, 0x4d5426b4, 0x43e76b26, 0x051bc437, 0xd8f28a02, 0x23ccc30e, 0x811d1a2d, 0x039128d4, 0x9cd96a73, 0x216e6a8d, 0x97293fe8, 0xe4fc6d09, 0x1ad34423, 0x9722d7e4, 0x5a6fdeca, 0x3c94a7e1, 0x81a9a876, 0xae3f7c0e, 0x624b50ee, 0x875e5771, 0x0095ab74, 0x1a7333fb, 0x056a4221, 0xa38351fa, 0x73f575f1, 0x8fded05b, 0x9097138f, 0xbd74620c, 0x62d3f5f2, 0x07b78bd0, 0xbafdd81e, 0x0638f2ff, 0x1f6e3aeb, 0xa7786473, 0x71700e1d, 0x6b4625ab, 0xf02867e1, 0xb2b2408f, 0x9ce21ce5, 0xa62baaaf, 0x26720461, 0x434813ee, 0x33bc0f14, 0xaaab098a, 0x750af488, 0xc31bf476, 0x9cecbf26, 0x94793cf3, 0xe1a27584, 0xe80c4880, 0x1299f748, 0x25e55ed2, 0x405e3feb, 0x109e2412, 0x3e55f94f, 0x59575864, 0x365c869d, 0xc9852e6a, 0x12c30c62, 0x47f5b286, 0xb47e488d, 0xa6667571, 0x78220d67, 0xa49e30b9, 0x2005ef88, 0xf6d3816d, 0x6926834b, 0xe6116805, 0x694777aa, 0x464af25b, 0x0e0e2d27, 0x0ea92eae, 0x602c2ca9, 0x1d1d79c5, 0x6364f280, 0x939ee1a4, 0x3b851bd8, 0x5bb6f19f, 0x80b9ed54, 0x3496a9f1, 0xdf815033, 0x91612339, 0x14c516d6, 0xa3f0a804, 0x5e78e975, 0xf408bcd9, 0x63d525ed, 0xa1e459c3, 0xfde303af, 0x049fc17f, 0xe7ed4489, 0xfaeefdb6, 0x2b1b2fa8, 0xc67579a6, 0x5505882e, 0xe3e1c7cb, 0xed53bf30, 0x9e628351, 0x8fa12113, 0x7500c30f, 0xde1bee00, 0xf1fefe06, 0xdc759c00, 0x4c75e5ab, 0xf889b069, 0x695bf8ae, 0x47d6600f, 0xd2a84f87, 0xa0ca82a9, 0x8d2b750c, 0xe03d8cd7, 0x581fea33, 0x969b0460, 0x36c7b7de, 0x74b3fd20, 0x2bb8bde6, 0x13b20dec, 0xa2dcee89, 0xca36229d, 0x06fdb74e, 0x6d9a982d, 0x02503496, 0xbdb4e0d9, 0xbd1f94cf, 0x6d26f82d, 0xcf5e41cd, 0x88b67b65, 0x3e1b3ee4, 0xb20e5e53, 0x1d9be438, 0xcef9c692, 0x299bd1b2, 0xb1279627, 0x210b5f3d, 0x5569bd88, 0x9652ed43, 0x7e8e0f8c, 0xdfa01085, 0xcd6d6343, 0xb8739826, 0xa52ce9a0, 0xd33ef231, 0x1b4d92c2, 0xabfa116d, 0xcdf47800, 0x3a4eefdc, 0xd01f3bcf, 0x30a32f46, 0xfb54d851, 0x06a98f67, 0xbdcd0a71, 0x21a00949, 0xfe7049c9, 0x67ef46d2, 0xa1fabcbc, 0xa4c72db4, 0x4a8a910d, 0x85a890ad, 0xc37e9454, 0xfc3d034a, 0x6f46cc52, 0x742be7a8, 0xe94ecbc5, 0x5f993659, 0x98270309, 0x8d1adae9, 0xea6e035e, 0x293d5fae, 0x669955b3, 0x5afe23b5, 0x4c74efbf, 0x98106505, 0xfbe09627, 0x3c00e8df, 0x5b03975d, 0x78edc83c, 0x117c49c6, 0x66cdfc73, 0xfa55c94f, 0x5bf285fe, 0x2db49b7d, 0xfbfeb8f0, 0xb7631bab, 0x837849f3, 0xf77f3ae5, 0x6e5db9bc, 0xfdd76f15, 0x545abf92, 0x8b538102, 0xdd5c9b65, 0xa5adfd55, 0xecbd7bc5, 0x9f99ebdd, 0x67500dcb, 0xf5246d1f, 0x2b0c061c, 0x927a3747, 0xc77ba267, 0x6da9f855, 0x6240d41a, 0xe9d1701d, 0xc69f0c55, 0x2c2c37cf, 0x12d82191, 0x47be40d3, 0x165b35cd, 0xb7db42e1, 0x358786e4, 0x84b8fc4e, 0x92f57c28, 0xf9c8bbd7, 0xab95a33d, 0x11009238, 0xe9770420, 0xd6967e2a, 0x97c1589f, 0x2ee7e7d3, 0x32cc86da, 0xe47767d1, 0x73e9b61e, 0xd35bac45, 0x835a62bb, 0x5d9217b0, 0x43f3f0ed, 0x8a97911e, 0x4ec7eb55, 0x4b5a988c, 0xb9056683, 0x45456f97, 0x1669fe44, 0xafb861b8, 0x8e83a19c, 0x0bab08d6, 0xe6a145a9, 0xc31e5fc2, 0x27621f4c, 0x795692fa, 0xb5e33ab9, 0x1bc786b6, 0x45d1c106, 0x986531c9, 0x40c9a0ec, 0xff0fdf84, 0xa7359a42, 0xfd1c2091, 0xf73463d4, 0x51b0d635, 0x1d602fb4, 0xc56b69b7, 0x6909d3f7, 0xa04d68f4, 0x8d1001a7, 0x8ecace50, 0x21ec4765, 0x3530f6b0, 0x645f3644, 0x9963ef1e, 0x2b3c70d5, 0xa20c823b, 0x8d26dcae, 0x05214e0c, 0x1993896d, 0x62085a35, 0x7b620b67, 0x1dd85da2, 0x09ce9b1d, 0xd7873326, 0x063ff730, 0xf4ff3c14, 0x09a49d69, 0x532062ba, 0x03ba7729, 0xbd9a86cc, 0xe26d02a7, 0x7ccbe5d3, 0x4f662214, 0x8b999a66, 0x3d0b92b4, 0x70b210f0, 0xf5b8f16f, 0x32146d34, 0x430b92bf, 0x8ab6204c, 0x35e6e1ff, 0xc2f6c2fa, 0xa2df8a1a, 0x887413ec, 0x7cb7a69f, 0x7ac6dbe6, 0x9102d1cb, 0x8892a590, 0xc804fe3a, 0xdfc4920a, 0xfc829840, 0x8910d2eb, 0x38a210fd, 0x9d840cc9, 0x7b9c827f, 0x3444ca0c, 0x071735ab, 0x5e9088e4, 0xc995d60e, 0xbe0bb942, 0x17b089ae, 0x050e1054, 0xcf4324f7, 0x1e3e64dd, 0x436414bb, 0xc48fc2e3, 0x6b6b83d4, 0x9f6558ac, 0x781b22c5, 0x7147cfe2, 0x3c221b4d, 0xa5602765, 0x8f01a4f0, 0x2a9f14ae, 0x12158cb8, 0x28177c50, 0x1091a165, 0x39e4e4be, 0x3e451b7a, 0xd965419c, 0x52053005, 0x0798aa53, 0xe6773e13, 0x1207f671, 0xd2ef998b, 0xab88a38f, 0xc77a8482, 0xa88fb031, 0x5199e0cd, 0x01b30536, 0x46eeb0ef, 0x814259ff, 0x9789a8cf, 0x376ec5ac, 0x7087034a, 0x948b6bdd, 0x4281e628, 0x2c848370, 0xd76ce66a, 0xe9b6959e, 0x24321a8e, 0xdeddd622, 0xb890f960, 0xea26c00a, 0x55e7d8b2, 0xeab67f09, 0x9227fb08, 0xeebbed06, 0xcac1b0d1, 0xb6412083, 0x05d2b0e7, 0x9037624a, 0xc9702198, 0x2c8d1a86, 0x3e7d416e, 0xc3f1a39f, 0xf04bdce4, 0xc88cdb61, 0xbdc89587, 0x4d29b63b, 0x6f24c267, 0x4b529c87, 0x573f5a53, 0xdb3316e9, 0x288eb53b, 0xd2c074bd, 0xef44a99a, 0x2b404d2d, 0xf6706464, 0xfe824f4c, 0xc3debaf8, 0x12f44f98, 0x03135e76, 0xb4888e7f, 0xb6b2325d, 0x3a138259, 0x513c83ec, 0x2386d214, 0x94555500, 0xfbd1522d, 0xda2af018, 0x15b054c0, 0x5ad654e6, 0xb6ed00aa, 0xa2f2180e, 0x5f662825, 0xecd11366, 0x1de5e99d, 0x07afd2ad, 0xcf457b04, 0xe631e10b, 0x83ae8a21, 0x709f0d59, 0x3e278bf9, 0x246816db, 0x9f5e8fd3, 0xc5b5b5a2, 0xd54a9d5c, 0x4b6f2856, 0x2eb5a666, 0xfc68bdd4, 0x1ed1a7f8, 0x98a34b75, 0xc895ada9, 0x2907cc69, 0x87b0b455, 0xddaf96d9, 0xe7da15a6, 0x9298c82a, 0x72bd5cab, 0x2e2a6ad4, 0x7f4b6bb8, 0x525225fe, 0x985abe90, 0xac1fd6e1, 0xb8340f23, 0x92985159, 0x7d29501d, 0xe75dc744, 0x687501b4, 0x92077dc3, 0x58281a67, 0xe7e8e9be, 0xd0e64fd1, 0xb2eb0a30, 0x0e1feccd, 0xc0dc4a9e, 0x5c4aeace, 0x2ca5b93c, 0xee0ec34f, 0xad78467b, 0x0830e76e, 0x0df63f8b, 0x2c2dfd95, 0x9b41ed31, 0x9ff4cddc, 0x1590c412, 0x2366fc82, 0x7a83294f, 0x9336c4de, 0x2343823c, 0x5b681096, 0xf320e4c2, 0xc22b70e2, 0xb5fbfb2a, 0x3ebc2fed, 0x11af07bd, 0x429a08c5, 0x42bee387, 0x58629e33, 0xfb63b486, 0x52135fbe, 0xf1380e60, 0x6355de87, 0x2f0bb19a, 0x167f63ac, 0x507224cf, 0xf7c99d00, 0x71646f50, 0x74feb1ca, 0x5f9abfdd, 0x278f7d68, 0x70120cd7, 0x4281b0f2, 0xdc8ebe5c, 0x36c32163, 0x2da1e884, 0x61877598, 0xbef04402, 0x304db695, 0xfa8e9add, 0x503bac31, 0x0fe04722, 0xf0d59f47, 0xcdc5c595, 0x918c39dd, 0x0cad8d05, 0x6b3ed1eb, 0x4d43e089, 0x7ab051f8, 0xdeec371f, 0x0f4816ae, 0xf8a1a240, 0xd15317f6, 0xb8efbf0b, 0xcdd05df8, 0x4fd5633e, 0x7cf19668, 0x25d8f422, 0x72d156f2, 0x2a778502, 0xda7aefb9, 0x4f4f66e8, 0x19db6bff, 0x74e468da, 0xa754f358, 0x7339ec50, 0x139006f6, 0xefbd0b91, 0x217e9a73, 0x939bd79c }; START_TEST (gu_spooky_test) { uint8_t buf[BUFSIZE]; size_t i; for (i = 0; i < BUFSIZE; ++i) { uint32_t res; buf[i] = i+128; /* It looks like values for messages under bufSize are for the "short" * algorithm, incompatible with the real one. */ if (i < _spooky_bufSize) { /* using 128-bit version */ uint64_t h[2]; gu_spooky_short (buf, i, h); res = (uint32_t)gu_le64(h[0]); } else { /* using 32-bit version */ res = gu_spooky32 (buf, i); } if (res != expected[i]) { ck_abort_msg("%zu: expected: 0x%.8" PRIX64 " , found: 0x%.8" PRIX32, i, expected[i], res); } } } END_TEST Suite *gu_spooky_suite(void) { Suite *s = suite_create("Spooky hash"); TCase *tc = tcase_create("gu_spooky"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_spooky_test); return s; } galera-4-26.4.25/galerautils/tests/gu_progress_test.cpp000644 000164 177776 00000003767 15107057155 024311 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2021 Codership Oy */ #include "../src/gu_progress.hpp" #include #include #include // std::pair #include #include #include // to sleep in C++ style class callback : public gu::Progress::Callback { std::vector > expect_; public: callback() : expect_() { expect_.push_back(std::pair(3,3)); expect_.push_back(std::pair(2,3)); expect_.push_back(std::pair(1,2)); expect_.push_back(std::pair(0,2)); } void operator()(int const first, int const second) { std::pair const exp(expect_.back()); bool const equal(exp == std::pair(first, second)); ck_assert_msg(equal, "Expected (%d, %d), got (%d, %d)", exp.first, exp.second, first, second); expect_.pop_back(); } }; START_TEST(progress) { callback cb; { /* Ctor calls event callback for the first time */ gu::Progress prog(&cb, "Testing", " units", 2, 1); /* This calls event callback for the second time. Need to sleep * a second here due to certain rate limiting in progress object */ std::this_thread::sleep_for(std::chrono::milliseconds(1000)); prog.update(1); /* THis extends the amount of "work" by 1 * (to test "crawling" progress of catching up, for example) */ prog.update_total(1); /* THis calls event callback for the 3rd time */ std::this_thread::sleep_for(std::chrono::milliseconds(1000)); prog.update(1); prog.finish(); /* Progress dtor calls event callback for the 4th time */ } } END_TEST Suite* progress_suite() { Suite* s = suite_create ("progress_suite"); TCase* tc; tc = tcase_create ("progress_case"); tcase_add_test (tc, progress); tcase_set_timeout(tc, 60); suite_add_tcase (s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_vector_test.cpp000644 000164 177776 00000004333 15107057155 023735 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2020 Codership Oy * * $Id$ */ #include "../src/gu_vector.hpp" #include "gu_vector_test.hpp" START_TEST (simple_test) { // we are not to test the whole vector functionality, it is provided // by incorporated std::vector. We just need to see that allocator // works as expected gu::Vector v1; v1->reserve(12); ck_assert(v1->size() == 0); v1->push_back(12); ck_assert(v1->size() == 1); v1->resize(11); ck_assert(v1->size() == 11); ck_assert(v1.in_heap() == false); v1[10]=1; ck_assert(v1[10] == v1()[10]); gu::Vector v2(v1); ck_assert(v2->size() == v1->size()); ck_assert(v1[10] == v2[10]); ck_assert(&v1[10] != &v2[10]); v2[10]=2; ck_assert(v1[10] != v2[10]); v2() = v1(); ck_assert(v1[10] == v2[10]); ck_assert(&v1[0] != &v2[0]); ck_assert(v2.in_heap() == false); v2->resize(32); ck_assert(v2.in_heap() == true); ck_assert(v1.in_heap() == false); v2[25]=1; v1->resize(32); ck_assert(v1.in_heap() == true); v1[25]=2; ck_assert(v1[25] != v2[25]); } END_TEST START_TEST(size_test) { gu::Vector v; ck_assert(v.size() == 0); void* const ptr1(reinterpret_cast(1)); v.push_back(ptr1); ck_assert(v()[0] == ptr1); ck_assert(v[0] == ptr1); ck_assert(v().front() == ptr1); ck_assert(v.front() == ptr1); ck_assert(v().back() == ptr1); ck_assert(v.back() == ptr1); ck_assert_msg(v.size() == 1, "v.size() expected 1, got %zu", v.size()); void* const ptr2(reinterpret_cast(2)); v.push_back(ptr2); ck_assert(v()[0] == ptr1); ck_assert(v[0] == ptr1); ck_assert(v()[1] == ptr2); ck_assert(v[1] == ptr2); ck_assert(v().front() == ptr1); ck_assert(v.front() == ptr1); ck_assert(v().back() == ptr2); ck_assert(v.back() == ptr2); ck_assert_msg(v.size() == 2, "v.size() expected 2, got %zu", v.size()); ck_assert(v.in_heap()); } END_TEST Suite* gu_vector_suite(void) { TCase* t = tcase_create ("simple_test"); tcase_add_test (t, simple_test); tcase_add_test (t, size_test); Suite* s = suite_create ("gu::Vector"); suite_add_tcase (s, t); return s; } galera-4-26.4.25/galerautils/tests/deqmap_bench.cpp000644 000164 177776 00000035204 15107057155 023310 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2020 Codership Oy */ /** * This is to benchmark some iteration/erase/insert operations of gu::DecMap * and comparing those to std::map */ #define NDEBUG 1 #include #include "../src/gu_deqmap.hpp" #include "../src/gu_limits.h" // GU_PAGE_SIZE #include #include #include #include #include #include #include static double time_diff(const struct timeval& l, const struct timeval& r) { double const left(double(l.tv_usec)*1.0e-06 + l.tv_sec); double const right(double(r.tv_usec)*1.0e-06 + r.tv_sec); return left - right; } typedef int64_t Key; typedef int64_t Val; typedef std::map StdMap; typedef gu::DeqMap DeqMap; #if 0 // unused ATM static inline std::ostream& operator<<(std::ostream& os, const StdMap& m) { os << "std::map(size: " << m.size(); os << ", begin: "; m.size() ? os << m.begin()->first : os << "n/a"; os << ", end: "; m.size() ? os << m.rbegin()->first : os << "n/a"; os << ", front: "; m.size() ? os << m.begin()->second : os << "n/a"; os << ", back: "; m.size() ? os << m.rbegin()->second : os << "n/a"; os << ')'; return os; } #endif static inline Val& iterator2ref(DeqMap::iterator it) { return *it; } static inline Val& iterator2ref(DeqMap::reverse_iterator it) { return *it; } static inline Val& iterator2ref(StdMap::iterator it) { return it->second; } static inline Val& iterator2ref(StdMap::reverse_iterator it) { return it->second; } template struct FillByInsert; template <> struct FillByInsert { FillByInsert(StdMap& map, Key const size) { StdMap::iterator hint(map.begin()); for (Key i(0); i < size; ++i) { hint = map.insert(hint, std::pair(i, i)); } } }; template <> struct FillByInsert { FillByInsert(DeqMap& map, Key const size) { for (Key i(0); i < size; ++i) { map.insert(map.end(), i); } } }; template struct FillByIndex; template <> struct FillByIndex { FillByIndex(DeqMap& map, Key const size) { for (Key i(0); i < size; ++i) { map.insert(i, i); } } }; template struct FillByPushFront { FillByPushFront(Map& map, Key const size) { map.clear(size); // initialize begin_ for (Key i(size - 1); i >= 0; --i) { map.push_front(i); } } }; template struct FillByPushBack { FillByPushBack(Map& map, Key const size) { for (Key i(0); i < size; ++i) { map.push_back(i); } } }; template struct AccessByDirectIterator { AccessByDirectIterator(Map& map, Key) { Key val(reinterpret_cast(&map)); for (typename Map::iterator it(map.begin()); it != map.end(); ++it, ++val) { iterator2ref(it) = val; } } }; template struct AccessByReverseIterator { AccessByReverseIterator(Map& map, Key) { Key val(reinterpret_cast(&map)); for (typename Map::reverse_iterator it(map.rbegin()); it != map.rend(); ++it, ++val) { iterator2ref(it) = val; } } }; template struct AccessByRandom; // operator[] template <> struct AccessByRandom { AccessByRandom(StdMap& map, Key) { Key val(reinterpret_cast(&map)); for (Key i(map.begin()->first); i <= map.rbegin()->first; ++i, ++val) { map[i] = val; } } }; template <> struct AccessByRandom { AccessByRandom(DeqMap& map, Key) { Key val(reinterpret_cast(&map)); for (Key i(map.index_begin()); i < map.index_end(); ++i, ++val) { map.insert(i, val); } } }; template struct RotateByEraseInsert; template <> struct RotateByEraseInsert { RotateByEraseInsert(StdMap& map, Key) { Key const first(map.rbegin()->first + 1); Key const last (first + map.size()); StdMap::iterator hint(--map.end()); for (Key next(first); next < last; ++next) { map.erase(map.begin()); hint = map.insert(hint, std::pair(next, next)); } } }; template <> struct RotateByEraseInsert { RotateByEraseInsert(DeqMap& map, Key) { Key const first(map.index_end()); Key const last (first + map.size()); for (Key next(first); next < last; ++next) { map.erase(map.begin()); map.insert(map.end(), next); } } }; template struct RotateByPopPush; template <> struct RotateByPopPush { RotateByPopPush(DeqMap& map, Key) { Key const first(map.index_end()); Key const last (first + map.size()); for (Key next(first); next < last; ++next) { map.pop_front(); map.push_back(next); } } }; template struct ClearByClear; template <> struct ClearByClear { ClearByClear(StdMap& map, Key) { map.clear(); } }; template <> struct ClearByClear { ClearByClear(DeqMap& map, Key) { map.clear(0); } }; template struct ClearByErase { ClearByErase(Map& map, Key) { map.erase(map.begin(), map.end()); } }; template struct ClearByDirectIterator { ClearByDirectIterator(Map& map, Key) { while(!map.empty()) { map.erase(map.begin()); } } }; template struct ClearByReverseIterator { ClearByReverseIterator(Map& map, Key) { while(!map.empty()) { map.erase(--map.end()); } } }; template struct ClearByPopFront { ClearByPopFront(Map& map, Key) { while(!map.empty()) { map.pop_front(); } } }; template struct ClearByPopBack { ClearByPopBack(Map& map, Key) { while(!map.empty()) { map.pop_back(); } } }; template class Operation> double timing(Map& map, Key size) { struct timeval tv_begin, tv_end; gettimeofday(&tv_begin, NULL); Operation(map, size); gettimeofday(&tv_end, NULL); return time_diff(tv_end, tv_begin); } static void mem_stats_bytes(double& VmSize, double& VmRSS, double& VmData) { static size_t const page_size(GU_PAGE_SIZE); int size, rss, shared, text, unused, data; std::ifstream statm("/proc/self/statm"); statm >> size >> rss >> shared >> text >> unused >> data; statm.close(); VmSize = size * page_size; VmRSS = rss * page_size; VmData = data * page_size; } struct Metric { double val; int count; void record(double t) { val += t; ++count; } }; // associative array that maps metric string id to a vector of metric records // for different container sizes typedef std::map > Metrics; static void record(Metrics& m, const char* const id, size_t const power, double const val) { std::vector& v(m[id]); // this check is done every time here in order to be able to add records // ad hoc, without the need to know overall testing plan if (power >= v.size()) v.resize(power + 1); v[power].record(val); } static void record_mem_stats(Metrics& m, size_t const power) { double VmSize, VmRSS, VmData; mem_stats_bytes(VmSize, VmRSS, VmData); #define RECORD(STAT) record(m, #STAT"/byte", power, STAT/sizeof(Val)); RECORD(VmSize); RECORD(VmRSS); RECORD(VmData); #undef RECORD } #define ASSERT_SIZE(S) \ if (map.size() != size_t(S)) { \ std::cout << "ASSERT_SIZE failed: expected: " << S << ", found: " \ << map.size() << " at line " << __LINE__ << std::endl; \ abort(); \ } // benchmarking loop for std::map container struct StdLoop { static void run(Metrics& m, Key const size, int const power) { #define MEASURE(OP) record(m, #OP, power, timing(map, size)); { StdMap map; MEASURE(FillByInsert ); ASSERT_SIZE(size); MEASURE(AccessByDirectIterator ); MEASURE(AccessByReverseIterator); record_mem_stats(m, power); MEASURE(AccessByRandom ); MEASURE(ClearByClear ); ASSERT_SIZE(0); } { StdMap map; MEASURE(FillByInsert ); MEASURE(RotateByEraseInsert ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByErase ); ASSERT_SIZE(0); } { StdMap map; MEASURE(FillByInsert ); MEASURE(RotateByEraseInsert ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByDirectIterator ); ASSERT_SIZE(0); } { StdMap map; MEASURE(FillByInsert ); MEASURE(RotateByEraseInsert ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByReverseIterator ); ASSERT_SIZE(0); } #undef MEASURE } }; // struct StdLoop // benchmarking loop for gu::DeqMap container struct DeqLoop { static void run(Metrics& m, Key const size, int const power) { #define MEASURE(OP) record(m, #OP, power, timing(map, size)); { DeqMap map(0); MEASURE(FillByInsert ); ASSERT_SIZE(size); MEASURE(AccessByDirectIterator ); MEASURE(AccessByReverseIterator); record_mem_stats(m, power); MEASURE(AccessByRandom ); MEASURE(RotateByEraseInsert ); ASSERT_SIZE(size); MEASURE(ClearByClear ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByPushFront ); MEASURE(RotateByEraseInsert ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByPopFront ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByPushBack ); MEASURE(RotateByEraseInsert ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByPopBack ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByIndex ); MEASURE(RotateByPopPush ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByErase ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByInsert ); MEASURE(RotateByPopPush ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByDirectIterator ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByIndex ); MEASURE(RotateByPopPush ); record_mem_stats(m, power); ASSERT_SIZE(size); MEASURE(ClearByReverseIterator ); ASSERT_SIZE(0); } { DeqMap map(0); MEASURE(FillByPushBack ); } { DeqMap map(0); MEASURE(FillByPushFront ); } #undef MEASURE } }; #undef ASSERT_SIZE static Key power_size(Key base_size, int power) { return base_size << power; } static void print_metrics(Metrics& metrics, Key const base_size, const char* const title) { std::cout << "================" << std::endl; std::cout << title << std::endl; int const columns(metrics.begin()->second.size()); std::cout << "Size(M):"; for (int c(0); c < columns; ++c) { std::cout << '\t' << (1 << c); } std::cout << std::endl; for (Metrics::iterator m(metrics.begin()); m != metrics.end(); ++m) { std::cout << m->first; std::vector& v(m->second); for (size_t p(0); p < v.size(); ++p) { std::cout << '\t' << v[p].val/v[p].count/power_size(base_size, p); } std::cout << std::endl; } std::cout << "----------------" << std::endl; } template void loops(Key const base_size, int const base_loops, int const max_power, const char* const title) { Metrics m; Key const max_size(power_size(base_size, max_power)); struct timeval tv_begin, tv_end; gettimeofday(&tv_begin, NULL); for (int power(0); power <= max_power; ++power) { std::cout << "Power: " << power << std::endl; Key const size(power_size(base_size, power)); // compensate shorter sizes with more loops int const loops(base_loops * (max_size/size)); std::cout << "Loops(" << loops << "):" << std::flush; for (int l(1); l <= loops; ++l) { Loop::run(m, size, power); std::cout << ' ' << l << std::flush; } std::cout << std::endl; } gettimeofday(&tv_end, NULL); std::cout << "Total time spent: " << time_diff(tv_end, tv_begin) << std::endl; print_metrics(m, base_size, title); } template void read_arg(char* argv[], int position, T& var) { std::string arg(argv[position]); std::istringstream is(arg); is >> var; } int main(int argc, char* argv[]) { static const char* const DEQ = "deq"; static const char* const MAP = "map"; Key const base_size(1 << 20); // 1M std::string container(DEQ); int max_power(0); int base_loops(1); if (argc >= 2) read_arg(argv, 1, container); if (argc >= 3) read_arg(argv, 2, max_power); if (argc >= 4) read_arg(argv, 3, base_loops); std::cout << "Running with parameters: container type = " << container << ", max power = " << max_power << ", base loops = " << base_loops << '\n'; // struct timeval tv_begin, tv_end; // gettimeofday(&tv_begin, NULL); if (container == DEQ) loops(base_size, base_loops, max_power, "gu::DeqMap"); else if (container == MAP) loops(base_size, base_loops, max_power, "std::map"); else { std::cerr << "First option should be either '" << DEQ << "' or '" << MAP << "'" << std::endl; return 1; } // gettimeofday(&tv_end, NULL); // std::cout << "Total time spent: " << time_diff(tv_end, tv_begin) // << std::endl; return 0; } galera-4-26.4.25/galerautils/tests/CMakeLists.txt000644 000164 177776 00000005170 15107057155 022735 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # # # C galerautils tests. # add_executable(gu_tests gu_tests.c gu_crc32c_test.c gu_mem_test.c gu_bswap_test.c gu_fnv_test.c gu_mmh3_test.c gu_spooky_test.c gu_hash_test.c gu_time_test.c gu_fifo_test.c gu_uuid_test.c gu_dbug_test.c gu_lock_step_test.c gu_str_test.c gu_utils_test.c ) # TODO: These should be eventually fixed. # - Wno-unused-parameter # # Suppress -Wself-assign which may emit unwanted warnings when # using byte swapping macros in way like k = gu_le64(k). # target_compile_options(gu_tests PRIVATE -Wno-unused-parameter -Wno-declaration-after-statement -Wno-vla ) target_link_libraries(gu_tests galerautils ${GALERA_UNIT_TEST_LIBS}) add_test( NAME gu_tests COMMAND gu_tests ) # # C++ galerautils tests. # add_executable(gu_tests++ gu_atomic_test.cpp gu_gtid_test.cpp gu_vector_test.cpp gu_string_test.cpp gu_vlq_test.cpp gu_digest_test.cpp gu_mem_pool_test.cpp gu_alloc_test.cpp gu_rset_test.cpp gu_utils_test++.cpp gu_string_utils_test.cpp gu_uri_test.cpp gu_config_test.cpp gu_net_test.cpp gu_datetime_test.cpp gu_histogram_test.cpp gu_stats_test.cpp gu_thread_test.cpp gu_asio_test.cpp gu_deqmap_test.cpp gu_progress_test.cpp gu_tests++.cpp ) target_compile_definitions(gu_tests++ PRIVATE -DGU_ASIO_TEST_CERT_DIR="${CMAKE_CURRENT_BINARY_DIR}/certs") # TODO: These should be eventually fixed. target_compile_options(gu_tests++ PRIVATE -Wno-conversion -Wno-unused-parameter ) target_link_libraries(gu_tests++ galerautilsxx ${GALERA_UNIT_TEST_LIBS} ) add_test( NAME gu_tests++ COMMAND gu_tests++ ) # # Deqmap micro benchmark. # add_executable(deqmap_bench deqmap_bench.cpp) target_compile_options(deqmap_bench PRIVATE -Wno-conversion) target_link_libraries(deqmap_bench galerautilsxx rt) # # CRC32C micro benchmark. # add_executable(crc32c_bench crc32c_bench.cpp) target_compile_options(crc32c_bench PRIVATE -Wno-conversion) target_link_libraries(crc32c_bench galerautilsxx) # # Hash implementation micro benchmark. # add_executable(avalanche avalanche.c) target_compile_options(avalanche PRIVATE -Wno-conversion -Wno-unused-parameter -Wno-declaration-after-statement -Wno-vla) target_link_libraries(avalanche galerautils) # # Test for old TO monitor implementation, should be removed once all of the # code which uses gu_to module has been removed. # add_executable(gu_to_test gu_to_test.c) target_link_libraries(gu_to_test galerautils) target_compile_options(gu_to_test PRIVATE -Wno-conversion -Wno-declaration-after-statement -Wno-vla) galera-4-26.4.25/galerautils/tests/gu_shared_ptr_test.cpp000644 000164 177776 00000003541 15107057155 024566 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2015 Codership Oy // #include "gu_shared_ptr.hpp" #include "gu_logger.hpp" #include "gu_datetime.hpp" #include "gu_shared_ptr_test.hpp" typedef gu::shared_ptr::type LongPtr; static void __attribute__((noinline)) pass_by_value(LongPtr val, long& acc) { acc += *val; } static void __attribute__((noinline)) pass_by_const_ref(const LongPtr& val, long& acc) { acc += *val; } static LongPtr __attribute__((noinline)) construct_and_ret(long i) { return LongPtr(new long(i)); } static double to_sec(const gu::datetime::Period& p) { return double(p.get_nsecs())/gu::datetime::Sec; } START_TEST(shared_ptr_test) { long acc(0); long iters(10000); gu::datetime::Date start(gu::datetime::Date::monotonic()); LongPtr longptr(new long(0)); for (long i(0); i < iters; ++i) { *longptr = i; pass_by_value(longptr, acc); } gu::datetime::Date end(gu::datetime::Date::monotonic()); log_info << "add_by_val: " << acc << " " << iters/to_sec(end - start); start = gu::datetime::Date::monotonic(); for (long i(0); i < iters; ++i) { *longptr = i; pass_by_const_ref(longptr, acc); } end = gu::datetime::Date::monotonic(); log_info << "add_by_const_ref: " << acc << " " << iters/to_sec(end - start); start = gu::datetime::Date::monotonic(); for (long i(0); i < iters; ++i) { LongPtr longptr(construct_and_ret(i)); acc += *longptr; } end = gu::datetime::Date::monotonic(); log_info << "construct_and_ret: " << acc << " " << iters/to_sec(end - start); } END_TEST Suite* gu_shared_ptr_suite(void) { Suite* s(suite_create("galerautils++ shared_ptr")); TCase* tc(tcase_create("shared_ptr")); suite_add_tcase(s, tc); tcase_add_test(tc, shared_ptr_test); return s; } galera-4-26.4.25/galerautils/tests/gu_datetime_test.hpp000644 000164 177776 00000000341 15107057155 024227 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * $Id$ */ #ifndef __gu_datetime_test_hpp__ #define __gu_datetime_test_hpp__ #include Suite* gu_datetime_suite(); #endif // __gu_datetime_test_hpp__ galera-4-26.4.25/galerautils/tests/gu_uri_test.hpp000644 000164 177776 00000000307 15107057155 023234 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_uri_test__ #define __gu_uri_test__ #include extern Suite *gu_uri_suite(void); #endif /* __gu_uri_test__ */ galera-4-26.4.25/galerautils/tests/gu_thread_test.cpp000644 000164 177776 00000002701 15107057155 023677 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2016-2020 Codership Oy // #include "gu_thread.hpp" #include #include "gu_thread_test.hpp" START_TEST(check_thread_schedparam_parse) { gu::ThreadSchedparam sp_other(SCHED_OTHER, 0); std::ostringstream oss; oss << sp_other; ck_assert_msg(oss.str() == "other:0", "'%s'", oss.str().c_str()); oss.str(""); gu::ThreadSchedparam sp_fifo(SCHED_FIFO, 95); oss << sp_fifo; ck_assert_msg(oss.str() == "fifo:95", "'%s'", oss.str().c_str()); oss.str(""); gu::ThreadSchedparam sp_rr(SCHED_RR, 96); oss << sp_rr; ck_assert_msg(oss.str() == "rr:96", "'%s'", oss.str().c_str()); } END_TEST START_TEST(check_thread_schedparam_system_default) { gu::ThreadSchedparam sp(gu::thread_get_schedparam(gu_thread_self())); std::ostringstream sp_oss; sp_oss << sp; std::ostringstream system_default_oss; system_default_oss << gu::ThreadSchedparam::system_default; ck_assert_msg(sp == gu::ThreadSchedparam::system_default, "sp '%s' != system default '%s'", sp_oss.str().c_str(), system_default_oss.str().c_str()); } END_TEST Suite* gu_thread_suite() { Suite* s(suite_create("galerautils Thread")); TCase* tc(tcase_create("schedparam")); suite_add_tcase(s, tc); tcase_add_test(tc, check_thread_schedparam_parse); tcase_add_test(tc, check_thread_schedparam_system_default); return s; } galera-4-26.4.25/galerautils/tests/copy_vs_assignment.cpp000644 000164 177776 00000013060 15107057155 024610 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2017 Codership Oy // This program compares performance of assignemnt operator vs. std::copy() // with both aligned and unaligned arguments. /* * Overall findings are (GCC-6.3, clang-3.8): * - 8-byte alignment is as good as 16-byte for 16-byte args. * - as rule of thumb std::copy() is MUCH slower on debug builds, * while ::memcpy() is not. * - as rule of thumb std::copy() is as fast (or faster) on optimized builds. * In particular: * - on x86/GCC std::copy() is 2-5x faster than = operator for 16-byte args. * - on armhf/GCC std::copy() is 2 orders of magniture faster than = * operator for 8-byte unaligned args and ~2x slower for 8-byte aligned. * - on x86 non-aligned access is significatntly (3x) slower than aligned, * there is almost no dependence of the penalty on the argument size. * - on armhf there is no difference between aligned and non-aligned * access except in case of 8 and 16 byte sized arguments. * - non-aligned 8-byte assignement on armhf is TWO ORDERS of magnitude slower * than std::copy() - probably due to a bug in GCC, clang does not show it. * - clang does not optimize std::copy() as good as GCC. * - GCC optimization is very sensitive to syntax. Something that is logically * equivalent may compile into something 4x slower. See examples below. * * Conclusions: * 1) default assignment operator drawbacks: * a) slower at bigger (bigger than the word size) data types * b) much slower and/or bus error/unknown command on non-aligned access * on some platforms (armhf/clang) * 2) std::copy() drawbacks: * a) much lower on non-optimized builds * b) optimization is sensitive to exact syntax, seemingly equivalent * expression may lead to 2x worse performance. * 3) ::memcpy() drawback: * it can be marginally slower than std::copy() (few percent) on optimized * builds. * * WINNER: ::memcpy()! * */ #define NDEBUG 1 #include #include #include #include // memcmp(), memcpy() #include "../src/gu_arch.h" #include "../src/gu_uuid.hpp" static double time_diff(const struct timeval& l, const struct timeval& r) { double const left(double(l.tv_usec)*1.0e-06 + l.tv_sec); double const right(double(r.tv_usec)*1.0e-06 + r.tv_sec); return left - right; } enum METHOD { ASSIGN, STDCOPY, MEMCPY }; template double timing() { std::string method; switch (m) { case ASSIGN: method = "assignment"; break; case STDCOPY: method = "std::copy()"; break; case MEMCPY: method = "::memcpy()"; break; } std::cout << "Timing " << (aligned ? "aligned " : "non-aligned ") << sizeof(T) << "-byte " << method << ":\t" << std::flush; int const loops((1 << 16)); static int const arr_size(1024); int const increment(aligned ? std::min(sizeof(T), GU_WORD_BYTES) : 1); char a1[arr_size + sizeof(T)] = { 1, }, a2[arr_size + sizeof(T)] = { 2, }; struct timeval tv_start, tv_end; gettimeofday(&tv_start, NULL); for (int l(0); l < loops; ++l) { int i(0); int j(arr_size / 2); for (int k(0); k < arr_size; ++k) { T t1((T())), t2((T())); T* const p1i(reinterpret_cast(a1 + i)); T* const p2j(reinterpret_cast(a2 + j)); switch (m) { case ASSIGN: t1 = *p1i; t2 = *p2j; *p1i = t2; *p2j = t1; break; case STDCOPY: std::copy(p1i, p1i + 1, &t1); std::copy(p2j, p2j + 1, &t2); std::copy(&t1, &t1 + 1, p2j); std::copy(&t2, &t2 + 1, p1i); break; case MEMCPY: ::memcpy(&t1, p1i, sizeof(T)); ::memcpy(&t2, p2j, sizeof(T)); ::memcpy(p2j, &t1, sizeof(T)); ::memcpy(p1i, &t2, sizeof(T)); break; } i = (i + increment) % arr_size; j = (j + increment) % arr_size; } } gettimeofday(&tv_end, NULL); double const ret(time_diff(tv_end, tv_start)); std::cout << ret << std::endl; return ret; } template void timing_type() { double a, c, m __attribute__((unused)); a = timing (); c = timing(); m = timing (); std::cout << "Diff(ass/copy): " << 2*(a - c)/(a + c) << std::endl; a = timing (); c = timing(); m = timing (); std::cout << "Diff(ass/copy): " << 2*(a - c)/(a + c) << std::endl; } /* a 16-byte type to compare assignment operator and std::copy() */ struct hexe { char a[16]; GU_FORCE_INLINE hexe& operator= (const hexe& h) { ::memcpy(a, h.a, sizeof(a)); // const char* const from(h.a); // std::copy(from, from + sizeof(a), a); // Surprisingly the following syntaxes turn out MUCH slower with GCC while // clang does not show anything like that, but then clang is generally slower // std::copy(h.a, h.a + sizeof(a), a); // std::copy(&h.a[0], &h.a[0 + sizeof(a)], a); // std::copy(&h.a[0], &h.a[0] + sizeof(a), a); //if (::memcmp(&h.a[0], &a[0], 16)) { abort(); } return *this; } }; int main() { timing_type(); timing_type(); timing_type(); timing_type(); timing_type(); timing_type(); return 0; } galera-4-26.4.25/galerautils/tests/gu_gtid_test.cpp000644 000164 177776 00000002611 15107057155 023357 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2015-2020 Codership Oy #include "../src/gu_gtid.hpp" #include #include "gu_gtid_test.hpp" START_TEST(gtid) { gu::GTID g0; ck_assert(g0.uuid() == GU_UUID_NIL); ck_assert(g0.seqno() == gu::GTID::SEQNO_UNDEFINED); ck_assert(g0.is_undefined() == true); gu::UUID const u(NULL, 0); gu::seqno_t const s(1234); gu::GTID g1(u, s); ck_assert(g0 != g1); ck_assert(g1.uuid() == u); ck_assert(g1.seqno() == s); ck_assert(g1.uuid() != g0.uuid()); ck_assert(g1.seqno() != g0.seqno()); ck_assert(!g1.is_undefined()); gu::GTID g2(g1); ck_assert(g1 == g2); gu::byte_t buf[27]; size_t const buflen(sizeof(buf)); ck_assert(buflen >= gu::GTID::serial_size()); size_t const offset(3); size_t const offset2(g2.serialize(buf, buflen, offset)); size_t const offset0(g0.unserialize(buf, buflen, offset)); ck_assert(offset2 == offset0); ck_assert(offset2 == (offset + gu::GTID::serial_size())); ck_assert(g0 == g2); std::stringstream os; os << g0; gu::GTID g3; os >> g3; ck_assert(g3 == g1); ck_assert(g3.uuid() == u); ck_assert(g3.seqno() == s); } END_TEST Suite* gu_gtid_suite(void) { Suite* const s(suite_create("gu::GTID")); TCase* const t(tcase_create("gtid")); suite_add_tcase(s, t); tcase_add_test(t, gtid); return s; } galera-4-26.4.25/galerautils/tests/gu_mem_pool_test.cpp000644 000164 177776 00000002332 15107057155 024237 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2020 Codership Oy // $Id$ #define TEST_SIZE 1024 #include "gu_mem_pool.hpp" #include "gu_mem_pool_test.hpp" START_TEST (unsafe) { gu::MemPoolUnsafe mp(10, 1, "unsafe"); void* const buf0(mp.acquire()); ck_assert(NULL != buf0); void* const buf1(mp.acquire()); ck_assert(NULL != buf1); ck_assert(buf0 != buf1); mp.recycle(buf0); void* const buf2(mp.acquire()); ck_assert(NULL != buf2); ck_assert(buf0 == buf2); log_info << mp; mp.recycle(buf1); mp.recycle(buf2); } END_TEST START_TEST (safe) { gu::MemPoolSafe mp(10, 1, "safe"); void* const buf0(mp.acquire()); ck_assert(NULL != buf0); void* const buf1(mp.acquire()); ck_assert(NULL != buf1); ck_assert(buf0 != buf1); mp.recycle(buf0); void* const buf2(mp.acquire()); ck_assert(NULL != buf2); ck_assert(buf0 == buf2); log_info << mp; mp.recycle(buf1); mp.recycle(buf2); } END_TEST Suite *gu_mem_pool_suite(void) { Suite *s = suite_create("gu::MemPool"); TCase *tc_mem = tcase_create("gu_mem_pool"); suite_add_tcase (s, tc_mem); tcase_add_test(tc_mem, unsafe); tcase_add_test(tc_mem, safe); return s; } galera-4-26.4.25/galerautils/tests/gu_mem_test.c000644 000164 177776 00000003654 15107057155 022656 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #define DEBUG_MALLOC // turn on the debugging code #define TEST_SIZE 1024 #include #include #include #include #include "gu_mem_test.h" #include "../src/galerautils.h" START_TEST (gu_mem_test) { void* ptr1; void* ptr2; int res; int i; ptr1 = gu_malloc (0); ck_assert_msg(NULL == ptr1, "Zero memory allocated, non-NULL pointer returned"); mark_point(); ptr1 = gu_malloc (TEST_SIZE); ck_assert_msg(NULL != ptr1, "NULL pointer returned for allocation" " errno: %s", strerror (errno)); mark_point(); ptr2 = memset (ptr1, 0xab, TEST_SIZE); ck_assert_msg(ptr2 == ptr1, "Memset changed pointer"); ptr2 = NULL; mark_point(); ptr2 = gu_realloc (ptr2, TEST_SIZE); ck_assert_msg(NULL != ptr2, "NULL pointer returned for reallocation" " errno: %s", strerror (errno)); memcpy (ptr2, ptr1, TEST_SIZE); mark_point(); ptr1 = gu_realloc (ptr1, TEST_SIZE + TEST_SIZE); res = memcmp (ptr1, ptr2, TEST_SIZE); ck_assert_msg(res == 0, "Realloc changed the contents of the memory"); mark_point(); ptr1 = gu_realloc (ptr1, 0); ck_assert_msg(res == 0, "Realloc to 0 didn't return NULL"); mark_point(); ptr1 = gu_calloc (1, TEST_SIZE); ck_assert_msg(NULL != ptr1, "NULL pointer returned for allocation" " errno: %s", strerror (errno)); for (i = 0; i < TEST_SIZE; i++) { res = ((char*)ptr1)[i]; if (res != 0) break; } ck_assert_msg(res == 0, "Calloc didn't clear up the memory"); mark_point(); gu_free (ptr1); mark_point(); gu_free (ptr2); } END_TEST Suite *gu_mem_suite(void) { Suite *s = suite_create("Galera memory utils"); TCase *tc_mem = tcase_create("gu_mem"); suite_add_tcase (s, tc_mem); tcase_add_test(tc_mem, gu_mem_test); return s; } galera-4-26.4.25/galerautils/tests/gu_str_test.c000644 000164 177776 00000004753 15107057155 022711 0ustar00jenkinsnogroup000000 000000 /* * Copyright (c) 2010-2020 Codership Oy */ #include "gu_str.h" #include START_TEST(test_append) { const char* strs[3] = { "t", "ttt", "tttttttt" }; char* str = NULL; size_t off = 0; size_t i; for (i = 0; i < 3; ++i) { str = gu_str_append(str, &off, strs[i], strlen(strs[i])); } free(str); } END_TEST START_TEST(test_scan) { const char* strs[5] = { "1", "234", "56789abc", "4657777777777", "345" }; char* str = NULL; size_t off = 0; size_t len = 0; size_t i; const char* ptr; for (i = 0; i < 5; ++i) { str = gu_str_append(str, &off, strs[i], strlen(strs[i])); len += strlen(strs[i]) + 1; } ptr = str; for (i = 0; i < 5; ++i) { ck_assert(strcmp(ptr, strs[i]) == 0); ptr = gu_str_next(ptr); } ck_assert(ptr == len + str); for (i = 0; i < 5; ++i) { ptr = gu_str_advance(str, i); ck_assert(strcmp(ptr, strs[i]) == 0); } free(str); } END_TEST START_TEST(test_str_table) { size_t n_cols = 5; char const* col_names[5] = { "col1", "column2", "foo", "bar", "zzz" }; size_t n_rows = 255; const char* row[5] = {"dddd", "asdfasdf", "sadfdf", "", "a"}; const char* name = "test_table"; char* str = NULL; size_t off = 0; size_t i; str = gu_str_table_set_name(str, &off, name); ck_assert(strcmp(gu_str_table_get_name(str), name) == 0); str = gu_str_table_set_n_cols(str, &off, n_cols); ck_assert(gu_str_table_get_n_cols(str) == n_cols); str = gu_str_table_set_n_rows(str, &off, n_rows); ck_assert(gu_str_table_get_n_rows(str) == n_rows); str = gu_str_table_set_cols(str, &off, n_cols, col_names); for (i = 0; i < n_rows; ++i) { str = gu_str_table_append_row(str, &off, n_cols, row); } mark_point(); FILE* tmp = fopen("/dev/null", "w"); ck_assert(NULL != tmp); gu_str_table_print(tmp, str); fclose(tmp); free(str); } END_TEST Suite* gu_str_suite() { Suite* s = suite_create("Galera Str util suite"); TCase* tc; tc = tcase_create("test_append"); tcase_add_test(tc, test_append); suite_add_tcase(s, tc); tc = tcase_create("test_scan"); tcase_add_test(tc, test_scan); suite_add_tcase(s, tc); tc = tcase_create("test_str_table"); tcase_add_test(tc, test_str_table); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_config_test.cpp000644 000164 177776 00000003643 15107057155 023703 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2020 Codership Oy // $Id$ #include "../src/gu_config.hpp" #include "gu_config_test.hpp" static std::string const key("test_key"); static std::string const another_key("another_key"); static std::string const str_value("123"); static long long const int_value( 123 ); START_TEST (gu_config_test) { gu::Config cnf; std::string svalue; long long ivalue; ck_assert(cnf.has(key) == false); try { cnf.is_set(key); ck_abort_msg("gu::NotFound expected"); } catch(gu::NotFound&) {} cnf.add(key); ck_assert(cnf.has(key)); ck_assert(cnf.is_set(key) == false); #define SUFFIX_CHECK(_suf_,_shift_) \ svalue = str_value + _suf_; \ cnf.set(key, svalue); \ ck_assert(cnf.is_set(key)); \ ck_assert(cnf.get(key) == svalue); \ ivalue = cnf.get(key); \ ck_assert(ivalue == (int_value << _shift_)); SUFFIX_CHECK('T', 40); // check overflow checks try { ivalue = cnf.get(key); ck_abort_msg("gu::Exception expected"); } catch (gu::Exception&) {} try { ivalue = cnf.get(key); ck_abort_msg("gu::Exception expected"); } catch (gu::Exception&) {} try { ivalue = cnf.get(key); ck_abort_msg("gu::Exception expected"); } catch (gu::Exception&) {} SUFFIX_CHECK('G', 30); SUFFIX_CHECK('M', 20); SUFFIX_CHECK('K', 10); // try { cnf.add(key, str_value); ck_abort_msg("gu::Exception expected"); } // catch (gu::Exception& e) {} cnf.add(another_key, str_value); ck_assert(cnf.is_set(another_key)); ivalue = cnf.get(another_key); ck_assert(ivalue == int_value); } END_TEST Suite *gu_config_suite(void) { Suite *s = suite_create("gu::Config"); TCase *tc = tcase_create("gu_config_test"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_config_test); return s; } galera-4-26.4.25/galerautils/tests/gu_stats_test.hpp000644 000164 177776 00000000312 15107057155 023567 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef __gu_stats_test__ #define __gu_stats_test__ #include extern Suite *gu_stats_suite(void); #endif // __gu_stats_test__ galera-4-26.4.25/galerautils/tests/gu_tests++.cpp000644 000164 177776 00000002004 15107057155 022655 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2017 Codership Oy #include #include #include #include extern "C" { #include "../src/gu_conf.h" } #include "gu_tests++.hpp" int main(int argc, char* argv[]) { bool no_fork = (argc >= 2 && std::string(argv[1]) == "nofork"); FILE* log_file = 0; if (!no_fork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); int failed = 0; for (int i = 0; suites[i] != 0; ++i) { SRunner* sr = srunner_create(suites[i]()); if (no_fork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all(sr, CK_NORMAL); failed += srunner_ntests_failed(sr); srunner_free(sr); } if (log_file != 0) fclose(log_file); printf ("Total tests failed: %d\n", failed); if (0 == failed && 0 != log_file) ::unlink(LOG_FILE); return failed == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-4-26.4.25/galerautils/tests/gu_utils_test.h000644 000164 177776 00000000264 15107057155 023237 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010 Codership Oy // $Id$ #ifndef __gu_utils_test__ #define __gu_utils_test__ Suite *gu_utils_suite(void); #endif /* __gu_utils_test__ */ galera-4-26.4.25/galerautils/tests/gu_atomic_test.hpp000644 000164 177776 00000000314 15107057155 023707 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2014 Codership Oy // $Id$ #ifndef __gu_atomic_test__ #define __gu_atomic_test__ #include Suite *gu_atomic_suite(void); #endif /* __gu_atomic_test__ */ galera-4-26.4.25/galerautils/tests/gu_deqmap_test.hpp000644 000164 177776 00000000312 15107057155 023700 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2020 Codership Oy #ifndef __gu_deqmap_test__ #define __gu_deqmap_test__ #include extern Suite *gu_deqmap_suite(void); #endif /* __gu_deqmap_test__ */ galera-4-26.4.25/galerautils/tests/gu_datetime_test.cpp000644 000164 177776 00000012461 15107057155 024230 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "gu_datetime.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" #include "gu_datetime_test.hpp" #include // std::fabs using namespace gu; using namespace gu::datetime; START_TEST(test_units) { ck_assert(NSec == 1LL); ck_assert(USec == 1000LL); ck_assert(MSec == 1000LL*1000LL); ck_assert(Sec == 1000LL*1000LL*1000LL); ck_assert(Min == 60LL*1000LL*1000LL*1000LL); ck_assert(Hour == 60LL*60LL*1000LL*1000LL*1000LL); ck_assert(Day == 24LL*60LL*60LL*1000LL*1000LL*1000LL); ck_assert(Month == 30LL*24LL*60LL*60LL*1000LL*1000LL*1000LL); ck_assert(Year == 12LL*30LL*24LL*60LL*60LL*1000LL*1000LL*1000LL); } END_TEST START_TEST(test_period) { // Zero periods ck_assert(Period("").get_nsecs() == 0); ck_assert(Period("P").get_nsecs() == 0); ck_assert(Period("PT").get_nsecs() == 0); // Year-mon-day ck_assert(Period("P3Y").get_nsecs() == 3*Year); ck_assert(Period("P5M").get_nsecs() == 5*Month); ck_assert(Period("P37D").get_nsecs() == 37*Day); ck_assert(Period("P3Y17M").get_nsecs() == 3*Year + 17*Month); ck_assert(Period("P5Y66D").get_nsecs() == 5*Year + 66*Day); ck_assert(Period("P37M44D").get_nsecs() == 37*Month + 44*Day); ck_assert(Period("P3YT").get_nsecs() == 3*Year); ck_assert(Period("P5MT").get_nsecs() == 5*Month); ck_assert(Period("P37DT").get_nsecs() == 37*Day); ck_assert(Period("P3Y17MT").get_nsecs() == 3*Year + 17*Month); ck_assert(Period("P5Y66DT").get_nsecs() == 5*Year + 66*Day); ck_assert(Period("P37M44DT").get_nsecs() == 37*Month + 44*Day); // Hour-min-sec ck_assert(Period("PT3H").get_nsecs() == 3*Hour); ck_assert(Period("PT5M").get_nsecs() == 5*Min); ck_assert(Period("P37S").get_nsecs() == 37*Sec); // Decimal seconds ck_assert(Period("PT0.5S").get_nsecs() == 500*MSec); ck_assert(Period("PT3.578777S").get_nsecs() == 3*Sec + 578*MSec + 777*USec); ck_assert(Period("PT5H7M3.578777S").get_nsecs() == 5*Hour + 7*Min + 3*Sec + 578*MSec + 777*USec); // Full ck_assert(Period("P10Y5M4DT3H24M1.1S").get_nsecs() == 10*Year + 5*Month + 4*Day + 3*Hour + 24*Min + 1.1*Sec); } END_TEST static void assert_invalid_period(const std::string& period) { bool exception = false; try { Period p(period); } catch (gu::NotFound& exp) { exception = true; } ck_assert(exception); } START_TEST(test_period_invalid) { assert_invalid_period("a"); assert_invalid_period("anyvalue"); assert_invalid_period("Panyvalue"); assert_invalid_period("PT.S"); assert_invalid_period("PT.D"); assert_invalid_period("PT1D"); assert_invalid_period("P1D1Y"); assert_invalid_period("P9223372036854775807Y"); // Overflow } END_TEST static void assert_double_eq_tol(double left, double right, double tol) { ck_assert(std::fabs(left - right) < tol); } START_TEST(test_period_from_double) { ck_assert(Period("0").get_nsecs() == 0); ck_assert(Period(".1").get_nsecs() == 100*MSec); ck_assert(Period("0.0").get_nsecs() == 0); ck_assert(Period("0.5").get_nsecs() == 500*MSec); // Use microsecond precision for comparison to make // it work on x86 assert_double_eq_tol(to_double(Period("0.5")), 0.5, 0.000001); assert_double_eq_tol(to_double(Period(".111111111")), 0.111111111, 0.000001);; } END_TEST START_TEST(test_period_overflow) { long long max_secs = std::numeric_limits::max() / gu::datetime::Sec; std::string max_duration("PT" + std::to_string(max_secs) + "S"); ck_assert(Period(max_duration).get_nsecs()); // no overflow std::string overflow_duration("PT" + std::to_string(max_secs + 1.0) + "S"); assert_invalid_period(overflow_duration); } END_TEST START_TEST(test_date) { Date d1(Date::monotonic()); Date d2 = d1 + Period("PT6S"); ck_assert(d2.get_utc() == d1.get_utc() + 6*Sec); ck_assert(d2 - Period("PT6S") == d1); Date max(Date::max()); ck_assert(d1 < max); } END_TEST START_TEST(test_trac_712) { try { Period p; p = gu::from_string("0x3"); // used to throw gu::Exception } catch (gu::NotFound& nf) { } } END_TEST Suite* gu_datetime_suite() { Suite* s = suite_create("gu::datetime"); TCase* tc; tc = tcase_create("test_units"); tcase_add_test(tc, test_units); suite_add_tcase(s, tc); tc = tcase_create("test_period"); tcase_add_test(tc, test_period); suite_add_tcase(s, tc); tc = tcase_create("test_period_invalid"); tcase_add_test(tc, test_period_invalid); suite_add_tcase(s, tc); tc = tcase_create("test_period_from_double"); tcase_add_test(tc, test_period_from_double); suite_add_tcase(s, tc); tc = tcase_create("test_period_overflow"); tcase_add_test(tc, test_period_overflow); suite_add_tcase(s, tc); tc = tcase_create("test_date"); tcase_add_test(tc, test_date); suite_add_tcase(s, tc); tc = tcase_create("test_trac_712"); tcase_add_test(tc, test_trac_712); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_crc32c_test.c000644 000164 177776 00000010331 15107057155 023145 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013-2020 Codership Oy * * $Id$ */ #include "../src/gu_crc32c.h" #include "gu_crc32c_test.h" #include #define long_input \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" \ "0123456789abcdef0123456789ABCDEF" #define long_output 0x7e5806b3 struct test_pair { const char* input; uint32_t output; }; //#define test_vector_length 6 /* * boost::crc_optimal<32, 0x1EDC6F41, 0, 0, true, true> crc; */ static struct test_pair test_vector[] = { { "", 0x00000000 }, { "1", 0x90f599e3 }, { "22", 0x47b26cf9 }, { "333", 0x4cb6e5c8 }, { "4444", 0xfb8150f7 }, { "55555", 0x23874b2f }, { "666666", 0xfad65244 }, { "7777777", 0xe4cbaa36 }, { "88888888", 0xda8901c2 }, { "123456789", 0xe3069283 }, // taken from SCTP mailing list { "My", 0xc7600404 }, // taken from { "test", 0x86a072c0 }, // http://www.zorc.breitbandkatze.de/crc.html { "vector", 0xa0b8f38a }, { long_input, long_output}, { NULL, 0x0 } }; static void test_function(void) { int i; for (i = 0; test_vector[i].input != NULL; i++) { const char* const input = test_vector[i].input; uint32_t const output = test_vector[i].output; uint32_t ret = gu_crc32c(input, strlen(input)); ck_assert_msg(ret == output, "Input '%s' resulted in %#08x, expected %#08x\n", input, ret, output); } const char* const input = long_input; uint32_t const output = long_output; int const size = strlen(input); int offset = 0; gu_crc32c_t crc; gu_crc32c_init(&crc); #define CRC_APPEND(x) gu_crc32c_append(&crc, &input[offset], x); offset += x; CRC_APPEND(1); CRC_APPEND(3); CRC_APPEND(5); CRC_APPEND(7); CRC_APPEND(13); CRC_APPEND(15); mark_point(); CRC_APPEND(0); CRC_APPEND(27); CRC_APPEND(43); CRC_APPEND(64); int tail = size - offset; ck_assert(tail >= 0); CRC_APPEND(tail); uint32_t ret = gu_crc32c_get (crc); ck_assert_msg(ret == output, "Generated %#08x, expected %#08x\n", ret, output); } START_TEST(test_gu_crc32c_sarwate) { gu_crc32c_func = gu_crc32c_sarwate; test_function(); } END_TEST START_TEST(test_gu_crc32c_slicing_by_4) { gu_crc32c_func = gu_crc32c_slicing_by_4; test_function(); } END_TEST START_TEST(test_gu_crc32c_slicing_by_8) { gu_crc32c_func = gu_crc32c_slicing_by_8; test_function(); } END_TEST #if defined(GU_CRC32C_X86) START_TEST(test_gu_crc32c_x86) { gu_crc32c_func = gu_crc32c_x86; test_function(); } END_TEST #if defined(GU_CRC32C_X86_64) START_TEST(test_gu_crc32c_x86_64) { gu_crc32c_func = gu_crc32c_x86_64; test_function(); } END_TEST #endif /* GU_CRC32C_X86_64 */ #endif /* GU_CRC32C_X86 */ #if defined(GU_CRC32C_ARM64) START_TEST(test_gu_crc32c_arm64) { gu_crc32c_func = gu_crc32c_hardware(); if (NULL != gu_crc32c_func) { ck_assert(gu_crc32c_arm64 == gu_crc32c_func); test_function(); } } END_TEST #endif /* GU_CRC32C_ARM64 */ Suite *gu_crc32c_suite(void) { gu_crc32c_configure(); /* compute lookup tables for SW implementations */ Suite *suite = suite_create("CRC32C implementations"); TCase *t; t = tcase_create("gu_crc32c_sw"); suite_add_tcase (suite, t); tcase_add_test (t, test_gu_crc32c_sarwate); tcase_add_test (t, test_gu_crc32c_slicing_by_4); tcase_add_test (t, test_gu_crc32c_slicing_by_8); #if defined(GU_CRC32C_X86) t = tcase_create("gu_crc32c_hw_x86"); suite_add_tcase (suite, t); tcase_add_test (t, test_gu_crc32c_x86); #if defined(GU_CRC32C_X86_64) tcase_add_test (t, test_gu_crc32c_x86_64); #endif /* GU_CRC32C_X86_64 */ #endif /* GU_CRC32C_X86 */ #if defined(GU_CRC32C_ARM64) t = tcase_create("gu_crc32c_hw_arm64"); suite_add_tcase (suite, t); tcase_add_test (t, test_gu_crc32c_arm64); #endif /* GU_CRC32C_ARM64 */ return suite; } galera-4-26.4.25/galerautils/tests/gu_digest_test.hpp000644 000164 177776 00000000323 15107057155 023712 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_digest_test__ #define __gu_digest_test__ #include extern Suite *gu_digest_suite(void); #endif /* __gu_digest_test__ */ galera-4-26.4.25/galerautils/tests/gu_histogram_test.hpp000644 000164 177776 00000000332 15107057155 024430 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef __gu_histogram_test__ #define __gu_histogram_test__ #include extern Suite *gu_histogram_suite(void); #endif // __gu_histogram_test__ galera-4-26.4.25/galerautils/tests/gu_uri_test.cpp000644 000164 177776 00000036423 15107057155 023237 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #include #include #include "../src/gu_uri.hpp" #include "../src/gu_exception.hpp" #include "../src/gu_logger.hpp" #include "gu_uri_test.hpp" using std::string; using std::pair; using gu::URI; using gu::URIQueryList; using gu::NotSet; using gu::NotFound; using gu::Exception; START_TEST (uri_test1) // checking normal URI { const string scheme("scheme"); const string user ("user:pswd"); const string host ("[::ffff:192.168.0.1]"); // IPv4 over IPv6 const string port ("4567"); const string path ("/path1/path2"); const string opt1 ("opt1"); const string val1 ("val1"); const string opt2 ("opt2"); const string val2 ("val2"); const string query (opt1 + '=' + val1 + '&' + opt2 + '=' + val2); const string frag ("frag"); string auth = user + "@" + host + ":" + port; string uri_str = scheme + "://" + auth + path + "?" + query + "#" + frag; try { URI uri(uri_str); try { ck_assert_msg(scheme == uri.get_scheme(), "Scheme '%s' != '%s'", scheme.c_str(), uri.get_scheme().c_str()); } catch (NotSet&) { ck_abort_msg("Scheme not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(user == uri.get_user(), "User info '%s' != '%s'", user.c_str(), uri.get_user().c_str()); } catch (NotSet&) { ck_abort_msg("User info not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(host == uri.get_host(), "Host '%s' != '%s'", host.c_str(), uri.get_host().c_str()); } catch (NotSet&) { ck_abort_msg("Host not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(port == uri.get_port(), "Port '%s' != '%s'", port.c_str(), uri.get_port().c_str()); } catch (NotSet&) { ck_abort_msg("Port not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(path == uri.get_path(), "Path '%s' != '%s'", path.c_str(), uri.get_path().c_str()); } catch (NotSet&) { ck_abort_msg("Path not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(frag == uri.get_fragment(), "Fragment '%s' != '%s'", frag.c_str(), uri.get_fragment().c_str()); } catch (NotSet&) { ck_abort_msg("Fragment not set in '%s'", uri_str.c_str()); } try { ck_assert_msg(auth == uri.get_authority(), "Authority '%s' != '%s'", auth.c_str(), uri.get_authority().c_str()); } catch (NotSet&) { ck_abort_msg("Authority not set in '%s'", uri_str.c_str()); } URIQueryList ql = uri.get_query_list(); ck_assert_msg(ql.size() == 2, "Query list size %zu, expected 2", ql.size()); URIQueryList::const_iterator i = ql.begin(); ck_assert_msg(i->first == opt1, "got option '%s', expected '%s'", i->first.c_str(), opt1.c_str()); ck_assert_msg(i->second == val1, "got value '%s', expected '%s'", i->second.c_str(), val1.c_str()); ++i; ck_assert_msg(i->first == opt2, "got option '%s', expected '%s'", i->first.c_str(), opt2.c_str()); ck_assert_msg(i->second == val2, "got value '%s', expected '%s'", i->second.c_str(), val2.c_str()); ck_assert(val1 == uri.get_option(opt1)); ck_assert(val2 == uri.get_option(opt2)); try { uri.get_option("xxx"); ck_abort_msg("Expected NotFound exception"); } catch (NotFound&) {} URI simple ("gcomm+pc://192.168.0.1"); } catch (Exception& e) { ck_abort_msg("%s", e.what()); } } END_TEST START_TEST (uri_test2) // checking corner cases { #ifdef NDEBUG try { URI uri(""); ck_abort_msg("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("scheme:"); } catch (Exception& e) { ck_abort_msg("URI should be valid."); } mark_point(); #ifdef NDEBUG try { URI uri(":path"); ck_abort_msg("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("a://b:c?d=e#f"); ck_abort_msg("URI should have failed."); } catch (Exception& e) {} mark_point(); try { URI uri("a://b:99999?d=e#f"); ck_abort_msg("URI should have failed."); } catch (Exception& e) {} mark_point(); #ifdef NDEBUG try { URI uri("?query"); ck_abort_msg("URI should have failed."); } catch (Exception& e) {} #endif mark_point(); try { URI uri("scheme:path"); try { uri.get_user(); ck_abort_msg("User should be unset"); } catch (NotSet&) {} try { uri.get_host(); ck_abort_msg("Host should be unset"); } catch (NotSet&) {} try { uri.get_port(); ck_abort_msg("Port should be unset"); } catch (NotSet&) {} try { uri.get_authority(); ck_abort_msg("Authority should be unset"); } catch (NotSet&) {} try { uri.get_fragment(); ck_abort_msg("Fragment should be unset"); } catch (NotSet&) {} ck_assert_msg(uri.get_query_list().size() == 0, "Query list must be empty"); } catch (Exception& e) { ck_abort_msg("%s", e.what()); } mark_point(); try { URI uri("scheme:///path"); try { ck_assert(uri.get_authority() == ""); } catch (NotSet&) { ck_abort_msg("Authority should be set"); } try { uri.get_host(); ck_abort_msg("Host should be unset"); } catch (NotSet&) { } try { uri.get_user(); ck_abort_msg("User should be unset"); } catch (NotSet&) {} try { uri.get_port(); ck_abort_msg("Port should be unset"); } catch (NotSet&) {} try { ck_assert(uri.get_path().length() == 5); } catch (NotSet&) { ck_abort_msg("Path should be 5 characters long"); } } catch (Exception& e) { ck_abort_msg("%s", e.what()); } mark_point(); try { URI uri("scheme://@/path"); try { ck_assert(uri.get_authority() == "@"); } catch (NotSet&) { ck_abort_msg("Authority should be set"); } try { ck_assert(uri.get_user() == ""); } catch (NotSet&) { ck_abort_msg("User should be set"); } try { ck_assert(uri.get_host() == ""); } catch (NotSet&) { ck_abort_msg("Host should be set"); } try { uri.get_port(); ck_abort_msg("Port should be unset"); } catch (NotSet&) {} } catch (Exception& e) { ck_abort_msg("%s", e.what()); } mark_point(); try { URI uri("scheme://@:/path"); try { ck_assert(uri.get_authority() == "@"); } catch (NotSet&) { ck_abort_msg("Authority should be set"); } try { ck_assert(uri.get_user() == ""); } catch (NotSet&) { ck_abort_msg("User should be set"); } try { ck_assert(uri.get_host() == ""); } catch (NotSet&) { ck_abort_msg("Host should be set"); } try { uri.get_port(); ck_abort_msg("Port should be unset"); } catch (NotSet&) {} } catch (Exception& e) { ck_abort_msg("%s", e.what()); } mark_point(); try { URI uri("scheme://"); try { ck_assert(uri.get_authority() == ""); } catch (NotSet&) { ck_abort_msg("Authority should be set"); } try { uri.get_user(); ck_abort_msg("User should be unset"); } catch (NotSet&) {} try { uri.get_host(); ck_abort_msg("Host should be unset"); } catch (NotSet&) { } try { uri.get_port(); ck_abort_msg("Port should be unset"); } catch (NotSet&) {} // According to http://tools.ietf.org/html/rfc3986#section-3.3 try { ck_assert(uri.get_path() == ""); } catch (NotSet&) { ck_abort_msg("Path should be set to empty"); } } catch (Exception& e) { ck_abort_msg("%s", e.what()); } } END_TEST START_TEST (uri_test3) // Test from gcomm { #ifdef NDEBUG try { URI too_simple("http"); ck_abort_msg("too simple accepted"); } catch (gu::Exception& e) { ck_assert(e.get_errno() == EINVAL); } #endif URI empty_auth("http://"); ck_assert(empty_auth.get_scheme() == "http"); ck_assert(empty_auth.get_authority() == ""); URI simple_valid1("http://example.com"); ck_assert(simple_valid1.get_scheme() == "http"); ck_assert(simple_valid1.get_authority() == "example.com"); ck_assert(simple_valid1.get_path() == ""); ck_assert(simple_valid1.get_query_list().size() == 0); URI with_path("http://example.com/path/to/file.html"); ck_assert(with_path.get_scheme() == "http"); ck_assert(with_path.get_authority() == "example.com"); ck_assert(with_path.get_path() == "/path/to/file.html"); ck_assert(with_path.get_query_list().size() == 0); URI with_query("http://example.com?key1=val1&key2=val2"); ck_assert(with_query.get_scheme() == "http"); ck_assert(with_query.get_authority() == "example.com"); ck_assert(with_query.get_path() == ""); const URIQueryList& qlist = with_query.get_query_list(); ck_assert(qlist.size() == 2); URIQueryList::const_iterator i; i = qlist.find("key1"); ck_assert(i != qlist.end() && i->second == "val1"); i = qlist.find("key2"); ck_assert(i != qlist.end() && i->second == "val2"); URI with_uri_in_query("gcomm+gmcast://localhost:10001?gmcast.node=gcomm+tcp://localhost:10002&gmcast.node=gcomm+tcp://localhost:10003"); ck_assert(with_uri_in_query.get_scheme() == "gcomm+gmcast"); ck_assert(with_uri_in_query.get_authority() == "localhost:10001"); const URIQueryList& qlist2 = with_uri_in_query.get_query_list(); ck_assert(qlist2.size() == 2); pair ii; ii = qlist2.equal_range("gmcast.node"); ck_assert(ii.first != qlist2.end()); for (i = ii.first; i != ii.second; ++i) { ck_assert(i->first == "gmcast.node"); URI quri(i->second); ck_assert(quri.get_scheme() == "gcomm+tcp"); ck_assert(quri.get_authority().substr(0, string("localhost:1000").size()) == "localhost:1000"); } try { URI invalid1("http://example.com/?key1"); ck_abort_msg("invalid query accepted"); } catch (gu::Exception& e) { ck_assert(e.get_errno() == EINVAL); } } END_TEST START_TEST(uri_non_strict) { std::string const ip("1.2.3.4"); std::string const port("789"); std::string const addr(ip + ':' + port); try { URI u(ip); ck_abort_msg("Strict mode passed without scheme"); } catch (gu::Exception& e) { ck_assert_msg(e.get_errno() == EINVAL, "Expected errno %d, got %d", EINVAL, e.get_errno()); } try { URI u(addr, false); ck_assert(u.get_host() == ip); ck_assert(u.get_port() == port); try { u.get_scheme(); ck_abort_msg("Scheme is '%s', should be unset", u.get_scheme().c_str()); } catch (gu::NotSet&) {} } catch (gu::Exception& e) { ck_assert(e.get_errno() == EINVAL); } } END_TEST START_TEST(uri_test_multihost) { try { gu::URI uri("tcp://host1,host2"); ck_assert(uri.get_authority_list().size() == 2); try { uri.get_authority_list()[0].user(); ck_abort_msg("User should not be set"); } catch (NotSet&) { } ck_assert(uri.get_authority_list()[0].host() == "host1"); try { uri.get_authority_list()[0].port(); ck_abort_msg("Port should not be set"); } catch (NotSet&) { } ck_assert(uri.get_authority_list()[1].host() == "host2"); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { gu::URI uri("tcp://host1:1234,host2:,host3:3456"); ck_assert(uri.get_authority_list().size() == 3); try { uri.get_authority_list()[0].user(); ck_abort_msg("User should not be set"); } catch (NotSet&) { } ck_assert(uri.get_authority_list()[0].host() == "host1"); ck_assert(uri.get_authority_list()[0].port() == "1234"); ck_assert(uri.get_authority_list()[1].host() == "host2"); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } } END_TEST START_TEST(uri_IPv6) { std::string const ip("[2001:db8:85a3::8a2e:370:7334]"); std::string const ip_unescaped("2001:db8:85a3::8a2e:370:7334"); std::string const port("789"); std::string const addr(ip + ':' + port); std::string const localhost("[::1]"); std::string const localhost_unescaped("::1"); std::string const default_unescaped("::"); std::string const invalid("[2001:db8:85a3::8a2e:370:7334[:789"); std::string const link_local_with_scheme("[fe80::fc87:f2ff:fe85:6ba6%lxdbr0]"); try { URI u(ip, false); ck_assert(u.get_host() == ip); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(ip_unescaped, false); ck_assert(u.get_host() == ip_unescaped); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(addr, false); ck_assert(u.get_host() == ip); ck_assert(u.get_port() == port); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(localhost, false); ck_assert(u.get_host() == localhost); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(localhost_unescaped, false); ck_assert(u.get_host() == localhost_unescaped); log_info << "host: " << u.get_host(); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(default_unescaped, false); ck_assert(u.get_host() == default_unescaped); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } try { URI u(invalid, false); ck_abort_msg("invalid uri accepted"); } catch (gu::Exception& e) { ck_assert(e.get_errno() == EINVAL); } try { URI u(link_local_with_scheme, false); ck_assert(u.get_host() == link_local_with_scheme); } catch (const gu::Exception& e) { ck_abort_msg("%s", e.what()); } } END_TEST Suite *gu_uri_suite(void) { Suite *s = suite_create("galerautils++ URI"); TCase *tc = tcase_create("URI"); suite_add_tcase (s, tc); tcase_add_test (tc, uri_test1); tcase_add_test (tc, uri_test2); tcase_add_test (tc, uri_test3); tcase_add_test (tc, uri_non_strict); tcase_add_test (tc, uri_test_multihost); tcase_add_test (tc, uri_IPv6); return s; } galera-4-26.4.25/galerautils/tests/gu_mmh3_test.h000644 000164 177776 00000000313 15107057155 022736 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_mmh3_test__ #define __gu_mmh3_test__ #include extern Suite *gu_mmh3_suite(void); #endif /* __gu_mmh3_test__ */ galera-4-26.4.25/galerautils/tests/gu_utils_test.c000644 000164 177776 00000006061 15107057155 023233 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2020 Codership Oy // $Id$ #include #include "gu_utils_test.h" #include "../src/gu_utils.h" #include #include #include START_TEST (gu_strconv_test) { long long llret; const char* strret; strret = gu_str2ll ("-1a", &llret); ck_assert(strret[0] == 'a'); ck_assert(-1 == llret); strret = gu_str2ll ("1K", &llret); ck_assert(strret[0] == '\0'); ck_assert((1 << 10) == llret); strret = gu_str2ll ("-1m", &llret); ck_assert(strret[0] == '\0'); ck_assert(-(1 << 20) == llret); strret = gu_str2ll ("354G0", &llret); ck_assert(strret[0] == '0'); ck_assert((354LL << 30) == llret); strret = gu_str2ll ("0m", &llret); ck_assert(strret[0] == '\0'); ck_assert(0 == llret); strret = gu_str2ll ("-999999999999999g", &llret); ck_assert(strret[0] == '\0'); ck_assert(LLONG_MIN == llret); bool b; strret = gu_str2bool ("-1a", &b); ck_assert(strret[0] == '-'); ck_assert(false == b); strret = gu_str2bool ("-1", &b); ck_assert(strret[0] == '-'); ck_assert(false == b); strret = gu_str2bool ("1a", &b); ck_assert(strret[0] == '1'); ck_assert(false == b); strret = gu_str2bool ("35", &b); ck_assert(strret[0] == '3'); ck_assert(false == b); strret = gu_str2bool ("0k", &b); ck_assert(strret[0] == '0'); ck_assert(false == b); strret = gu_str2bool ("1", &b); ck_assert(strret[0] == '\0'); ck_assert(true == b); strret = gu_str2bool ("0", &b); ck_assert(strret[0] == '\0'); ck_assert(false == b); strret = gu_str2bool ("Onn", &b); ck_assert(strret[0] == 'O'); ck_assert(false == b); strret = gu_str2bool ("oFf", &b); ck_assert(strret[0] == '\0'); ck_assert(false == b); strret = gu_str2bool ("offt", &b); ck_assert(strret[0] == 'o'); ck_assert(false == b); strret = gu_str2bool ("On", &b); ck_assert(strret[0] == '\0'); ck_assert(true == b); strret = gu_str2bool ("tru", &b); ck_assert(strret[0] == 't'); ck_assert(false == b); strret = gu_str2bool ("trUE", &b); ck_assert(strret[0] == '\0'); ck_assert(true == b); strret = gu_str2bool ("truEth", &b); ck_assert(strret[0] == 't'); ck_assert(false == b); strret = gu_str2bool (" fALsE", &b); ck_assert(strret[0] == ' '); ck_assert(false == b); strret = gu_str2bool ("fALsE", &b); ck_assert(strret[0] == '\0'); ck_assert(false == b); strret = gu_str2bool ("fALsEth", &b); ck_assert(strret[0] == 'f'); ck_assert(false == b); void* ptr; strret = gu_str2ptr ("-01234abc", &ptr); ck_assert(strret[0] == '\0'); ck_assert_msg(-0x1234abcLL == (intptr_t)ptr, "Expected %lld, got %" PRIdPTR, -0x1234abcLL, (intptr_t)ptr); } END_TEST Suite *gu_utils_suite(void) { Suite *s = suite_create("Galera misc utils functions"); TCase *tc = tcase_create("gu_utils"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_strconv_test); return s; } galera-4-26.4.25/galerautils/tests/gu_time_test.c000644 000164 177776 00000001627 15107057155 023034 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #include #include #include "gu_time_test.h" #include "../src/gu_time.h" START_TEST (gu_time_test) { struct timeval left = { 1, 900000 }; // 1.9 sec struct timeval right = { 5, 400000 }; // 5.4 sec double diff, tolerance = 1.0e-15; // double precision tolerance diff = gu_timeval_diff (&left, &right); ck_assert_msg(fabs(3.5 + diff) <= tolerance, "Expected %f, got %f, delta: %e", -3.5, diff, 3.5 + diff); diff = gu_timeval_diff (&right, &left); ck_assert_msg(fabs(3.5 - diff) <= tolerance, "Expected %f, got %f, delta: %e", 3.5, diff, 3.5 - diff); } END_TEST Suite *gu_time_suite(void) { Suite *s = suite_create("Galera time functions"); TCase *tc = tcase_create("gu_time"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_time_test); return s; } galera-4-26.4.25/galerautils/tests/gu_net_test.cpp000644 000164 177776 00000004004 15107057155 023214 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2020 Codership Oy #include #include #include #include #include #include #include #include #include "gu_logger.hpp" #include "gu_uri.hpp" #include "gu_resolver.hpp" #include "gu_lock.hpp" #include "gu_net_test.hpp" using std::vector; using std::string; using std::deque; using std::for_each; using namespace gu; using namespace gu::net; START_TEST(test_resolver) { std::string tcp_lh4("tcp://127.0.0.1:2002"); Addrinfo tcp_lh4_ai(resolve(tcp_lh4)); ck_assert(tcp_lh4_ai.get_family() == AF_INET); ck_assert(tcp_lh4_ai.get_socktype() == SOCK_STREAM); ck_assert_msg(tcp_lh4_ai.to_string() == tcp_lh4, "%s != %s", tcp_lh4_ai.to_string().c_str(), tcp_lh4.c_str()); std::string tcp_lh6("tcp://[::1]:2002"); Addrinfo tcp_lh6_ai(resolve(tcp_lh6)); ck_assert(tcp_lh6_ai.get_family() == AF_INET6); ck_assert(tcp_lh6_ai.get_socktype() == SOCK_STREAM); ck_assert_msg(tcp_lh6_ai.to_string() == tcp_lh6, "%s != %s", tcp_lh6_ai.to_string().c_str(), tcp_lh6.c_str()); std::string lh("tcp://localhost:2002"); Addrinfo lh_ai(resolve(lh)); ck_assert(lh_ai.to_string() == "tcp://127.0.0.1:2002" || lh_ai.to_string() == "tcp://[::1]:2002"); } END_TEST #if 0 /* bogus test, commenting out for now */ START_TEST(trac_288) { try { string url("tcp://do-not-resolve:0"); (void)resolve(url); } catch (Exception& e) { log_debug << "exception was " << e.what(); } } END_TEST #endif Suite* gu_net_suite() { Suite* s = suite_create("galerautils++ Networking"); TCase* tc; tc = tcase_create("test_resolver"); tcase_add_test(tc, test_resolver); tcase_set_timeout(tc, 30); suite_add_tcase(s, tc); #if 0 /* bogus test, commenting out for now */ tc = tcase_create("trac_288"); tcase_add_test(tc, trac_288); suite_add_tcase(s, tc); #endif return s; } galera-4-26.4.25/galerautils/tests/gu_thread_test.hpp000644 000164 177776 00000000311 15107057155 023677 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2016 Codership Oy // #ifndef GU_THREAD_TEST_HPP #define GU_THREAD_TEST_HPP #include extern Suite *gu_thread_suite(); #endif // GU_THREAD_TEST_HPP galera-4-26.4.25/galerautils/tests/gu_lock_step_test.c000644 000164 177776 00000007246 15107057155 024064 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include #include // usleep() #include // strerror() #include "../src/gu_log.h" #include "../src/gu_lock_step.h" #include "gu_lock_step_test.h" #define TEST_USLEEP 1000 // 1ms #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (TEST_USLEEP); }} gu_lock_step_t LS; static void* lock_step_thread (void* arg) { gu_lock_step_wait (&LS); return NULL; } START_TEST (gu_lock_step_test) { const long timeout = 500; // 500 ms long ret; gu_thread_t thr1, thr2; gu_lock_step_init (&LS); ck_assert(LS.wait == 0); ck_assert(LS.enabled == false); // first try with lock-stepping disabled ret = gu_thread_create (&thr1, NULL, lock_step_thread, NULL); ck_assert(ret == 0); WAIT_FOR(0 == LS.wait); // 10ms ck_assert(LS.wait == 0); // by default lock-step is disabled ret = gu_thread_join (thr1, NULL); ck_assert_msg(ret == 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); ck_assert(-1 == ret); // enable lock-step gu_lock_step_enable (&LS, true); ck_assert(LS.enabled == true); ret = gu_lock_step_cont (&LS, timeout); ck_assert(0 == ret); // nobody's waiting ret = gu_thread_create (&thr1, NULL, lock_step_thread, NULL); ck_assert(ret == 0); WAIT_FOR(1 == LS.wait); // 10ms ck_assert(LS.wait == 1); ret = gu_thread_create (&thr2, NULL, lock_step_thread, NULL); ck_assert(ret == 0); WAIT_FOR(2 == LS.wait); // 10ms ck_assert(LS.wait == 2); ret = gu_lock_step_cont (&LS, timeout); ck_assert(ret == 2); // there were 2 waiters ck_assert(LS.wait == 1); // 1 waiter remains ret = gu_lock_step_cont (&LS, timeout); ck_assert(ret == 1); ck_assert(LS.wait == 0); // 0 waiters remain ret = gu_thread_join (thr1, NULL); ck_assert_msg(ret == 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_thread_join (thr2, NULL); ck_assert_msg(ret == 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); ck_assert(ret == 0); // there were 0 waiters ck_assert_msg(LS.wait == 0, "Expected LS.wait to be 0, found: %ld", LS.wait); gu_lock_step_destroy (&LS); } END_TEST #define RACE_ITERATIONS 1000 static void* lock_step_race (void* arg) { long i; for (i = 0; i < RACE_ITERATIONS; i++) gu_lock_step_wait (&LS); return NULL; } START_TEST (gu_lock_step_race) { const long timeout = 500; // 500 ms long ret, i; gu_thread_t thr1; gu_lock_step_init (&LS); gu_lock_step_enable (&LS, true); ck_assert(LS.enabled == true); ret = gu_thread_create (&thr1, NULL, lock_step_race, NULL); ck_assert(ret == 0); for (i = 0; i < RACE_ITERATIONS; i++) { ret = gu_lock_step_cont (&LS, timeout); ck_assert_msg(ret == 1, "No waiter at iteration: %ld", i); } ck_assert(LS.wait == 0); // 0 waiters remain ret = gu_thread_join (thr1, NULL); ck_assert_msg(ret == 0, "gu_thread_join() failed: %ld (%s)", ret, strerror(ret)); ret = gu_lock_step_cont (&LS, timeout); ck_assert(ret == 0); } END_TEST Suite *gu_lock_step_suite(void) { Suite *suite = suite_create("Galera LOCK_STEP utils"); TCase *tcase = tcase_create("gu_lock_step"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gu_lock_step_test); tcase_add_test (tcase, gu_lock_step_race); return suite; } galera-4-26.4.25/galerautils/tests/gu_atomic_test.cpp000644 000164 177776 00000010455 15107057155 023711 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014-2020 Codership Oy */ #include "../src/gu_atomic.hpp" #include "gu_atomic_test.hpp" #include "gu_limits.h" #include START_TEST(test_sanity_c) { int64_t i, j, k; i = 1; j = 0; k = 3; gu_atomic_set(&i, &j); ck_assert(j == 0); ck_assert(i == 0); gu_atomic_get(&i, &k); ck_assert(i == 0); ck_assert(k == 0); j = gu_atomic_fetch_and_add (&i, 7); ck_assert(j == 0); ck_assert(i == 7); j = gu_atomic_fetch_and_sub (&i, 10); ck_assert(j == 7); ck_assert(i == -3); j = gu_atomic_fetch_and_or (&i, 15); ck_assert(j == -3); ck_assert(i == -1); j = gu_atomic_fetch_and_and (&i, 5); ck_assert(j == -1); ck_assert(i == 5); j = gu_atomic_fetch_and_xor (&i, 3); ck_assert(j == 5); ck_assert(i == 6); j = gu_atomic_fetch_and_nand(&i, 15); ck_assert(j == 6); ck_assert(i == -7); j = gu_atomic_add_and_fetch (&i, 7); ck_assert(j == 0); ck_assert(i == 0); j = gu_atomic_sub_and_fetch (&i, -2); ck_assert(j == 2); ck_assert(i == 2); j = gu_atomic_or_and_fetch (&i, 5); ck_assert(j == 7); ck_assert(i == 7); j = gu_atomic_and_and_fetch (&i, 13); ck_assert(j == 5); ck_assert(i == 5); j = gu_atomic_xor_and_fetch (&i, 15); ck_assert(j == 10); ck_assert(i == 10); j = gu_atomic_nand_and_fetch(&i, 7); ck_assert(j == -3); ck_assert(i == -3); } END_TEST START_TEST(test_sanity_cxx) { gu::Atomic i(1); int64_t const k(3); ck_assert(i() == 1); ck_assert(i() != k); ck_assert((i = k) == k); ck_assert(i() == k); ck_assert(i.fetch_and_zero() == k); ck_assert(i() == 0); ck_assert(i.fetch_and_add(5) == 0); ck_assert(i() == 5); ck_assert(i.add_and_fetch(3) == 8); ck_assert(i() == 8); ck_assert((++i)() == 9); ck_assert(i() == 9); ck_assert((--i)() == 8); ck_assert(i() == 8); i += 3; ck_assert(i() == 11); } END_TEST // we want it sufficiently long to test above least 4 bytes, but sufficiently // short to avoid overflow static long long const increment(333333333333LL); // number of add/sub thread pairs static int const n_threads(8); // maximum iterations number (to guarantee no overflow) static int const max_iter(GU_LLONG_MAX/increment/n_threads); // number of iterations capped at 1M, just in case static int const iterations(max_iter > 1000000 ? 1000000 : max_iter); static void* add_loop(void* arg) { int64_t* const var(static_cast(arg)); for (int i(iterations); --i;) { gu_atomic_fetch_and_add(var, increment); } return NULL; } static void* sub_loop(void* arg) { int64_t* const var(static_cast(arg)); for (int i(iterations); --i;) { gu_atomic_fetch_and_sub(var, increment); } return NULL; } static int start_threads(pthread_t* threads, int64_t* var) { for (int i(0); i < n_threads; ++i) { pthread_t* const add_thr(&threads[i * 2]); pthread_t* const sub_thr(add_thr + 1); int const add_err(pthread_create(add_thr, NULL, add_loop, var)); int const sub_err(pthread_create(sub_thr, NULL, sub_loop, var)); if (add_err != 0) return add_err; if (sub_err != 0) return sub_err; } return 0; } static int join_threads(pthread_t* threads) { for (int i(0); i < n_threads; ++i) { pthread_t* const add_thr(&threads[i * 2]); pthread_t* const sub_thr(add_thr + 1); int const add_err(pthread_join(*add_thr, NULL)); int const sub_err(pthread_join(*sub_thr, NULL)); if (add_err != 0) return add_err; if (sub_err != 0) return sub_err; } return 0; } // This may not catch concurrency problems every time. But sometimes it should // (if there are any). START_TEST(test_concurrency) { ck_assert(iterations >= 1000000); int64_t var(0); pthread_t threads[n_threads * 2]; ck_assert(0 == start_threads(threads, &var)); ck_assert(0 == join_threads(threads)); ck_assert(0 == var); } END_TEST Suite* gu_atomic_suite() { TCase* t1 = tcase_create ("sanity"); tcase_add_test (t1, test_sanity_c); tcase_add_test (t1, test_sanity_cxx); TCase* t2 = tcase_create ("concurrency"); tcase_add_test (t2, test_concurrency); tcase_set_timeout(t2, 60); Suite* s = suite_create ("gu::Atomic"); suite_add_tcase (s, t1); suite_add_tcase (s, t2); return s; } galera-4-26.4.25/galerautils/tests/gu_vector_test.hpp000644 000164 177776 00000000331 15107057155 023734 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_vector_test__ #define __gu_vector_test__ #include extern Suite *gu_vector_suite(void); #endif /* __gu_vector_test__ */ galera-4-26.4.25/galerautils/tests/gu_spooky_test.h000644 000164 177776 00000000323 15107057155 023417 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_spooky_test__ #define __gu_spooky_test__ #include extern Suite *gu_spooky_suite(void); #endif /* __gu_spooky_test__ */ galera-4-26.4.25/galerautils/tests/gu_hash_test.c000644 000164 177776 00000020013 15107057155 023007 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy /* * This unit test is mostly to check that Galera hash definitions didn't change: * correctness of hash algorithms definitions is checked in respective unit * tests. * * By convention checks are made against etalon byte arrays, so integers must be * converted to little-endian. * * $Id$ */ #include "gu_hash_test.h" #include "../src/gu_hash.h" #include "../src/gu_log.h" #include "../src/gu_hexdump.h" /* checks equivalence of two buffers, returns true if check fails and logs * buffer contents. */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { ssize_t str_size = size * 2.2 + 1; char c[str_size], r[str_size]; gu_hexdump (exp, size, c, sizeof(c), false); gu_hexdump (got, size, r, sizeof(r), false); gu_info ("expected hash value:\n%s\nfound:\n%s\n", c, r); return true; } return false; } static const char test_msg[2048] = { 0, }; #define GU_HASH_TEST_LENGTH 43 /* some random prime */ static const uint8_t gu_hash128_check[16] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84,0x73,0x41,0x3F,0xA5,0xEB,0x27,0x40,0x2F }; static const uint8_t gu_hash64_check[8] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84 }; static const uint8_t gu_hash32_check[4] = { 0xFA,0x2C,0x78,0x67 }; /* Tests partial hashing functions */ START_TEST (gu_hash_test) { gu_hash_t h; gu_hash_init(&h); gu_hash_append(&h, test_msg, GU_HASH_TEST_LENGTH); uint8_t res128[16]; gu_hash_get128 (&h, res128); ck_assert_msg(!check(gu_hash128_check, res128, sizeof(res128)), "gu_hash_get128() failed."); uint64_t res64 = gu_hash_get64(&h); ck_assert(gu_hash64(test_msg, GU_HASH_TEST_LENGTH) == res64); res64 = gu_le64(res64); ck_assert_msg(!check(gu_hash64_check, &res64, sizeof(res64)), "gu_hash_get64() failed."); uint32_t res32 = gu_hash_get32(&h); ck_assert(gu_hash32(test_msg, GU_HASH_TEST_LENGTH) == res32); res32 = gu_le32(res32); ck_assert_msg(!check(gu_hash32_check, &res32, sizeof(res32)), "gu_hash_get32() failed."); } END_TEST static const uint8_t fast_hash128_check0 [16] = { 0xA9,0xCE,0x5A,0x56,0x0C,0x0B,0xF7,0xD6,0x63,0x4F,0x6F,0x81,0x0E,0x0B,0xF2,0x0A }; static const uint8_t fast_hash128_check511 [16] = { 0xC6,0x7F,0x4C,0xE7,0x6F,0xE0,0xDA,0x14,0xCC,0x9F,0x21,0x76,0xAF,0xB5,0x12,0x1A }; static const uint8_t fast_hash128_check512 [16] = { 0x38,0x8D,0x2B,0x90,0xC8,0x7F,0x11,0x53,0x3F,0xB4,0x32,0xC1,0xD7,0x2B,0x04,0x39 }; static const uint8_t fast_hash128_check2011[16] = { 0xB7,0xCE,0x75,0xC7,0xB4,0x31,0xBC,0xC8,0x95,0xB3,0x41,0xB8,0x5B,0x8E,0x77,0xF9 }; static const uint8_t fast_hash64_check0 [8] = { 0x6C, 0x55, 0xB8, 0xA1, 0x02, 0xC6, 0x21, 0xCA }; static const uint8_t fast_hash64_check15 [8] = { 0x28, 0x49, 0xE8, 0x34, 0x7A, 0xAB, 0x49, 0x34 }; static const uint8_t fast_hash64_check16 [8] = { 0x44, 0x40, 0x2C, 0x82, 0xD3, 0x8D, 0xAA, 0xFE }; static const uint8_t fast_hash64_check511 [8] = { 0xC6, 0x7F, 0x4C, 0xE7, 0x6F, 0xE0, 0xDA, 0x14 }; static const uint8_t fast_hash64_check512 [8] = { 0x38, 0x8D, 0x2B, 0x90, 0xC8, 0x7F, 0x11, 0x53 }; static const uint8_t fast_hash64_check2011[8] = { 0xB7, 0xCE, 0x75, 0xC7, 0xB4, 0x31, 0xBC, 0xC8 }; static const uint8_t fast_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t fast_hash32_check31 [4] = { 0x1E, 0xFF, 0x48, 0x38 }; static const uint8_t fast_hash32_check32 [4] = { 0x63, 0xC2, 0x53, 0x0D }; static const uint8_t fast_hash32_check511 [4] = { 0xC6, 0x7F, 0x4C, 0xE7 }; static const uint8_t fast_hash32_check512 [4] = { 0x38, 0x8D, 0x2B, 0x90 }; static const uint8_t fast_hash32_check2011[4] = { 0xB7, 0xCE, 0x75, 0xC7 }; /* Tests fast hash functions */ START_TEST (gu_fast_hash_test) { uint8_t res128[16]; gu_fast_hash128 (test_msg, 0, res128); ck_assert(!check(fast_hash128_check0, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 511, res128); ck_assert(!check(fast_hash128_check511, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 512, res128); ck_assert(!check(fast_hash128_check512, res128, sizeof(res128))); gu_fast_hash128 (test_msg, 2011, res128); ck_assert(!check(fast_hash128_check2011, res128, sizeof(res128))); uint64_t res64; res64 = gu_fast_hash64 (test_msg, 0); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check0, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 15); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check15, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 16); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check16, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 511); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check511, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 512); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check512, &res64, sizeof(res64))); res64 = gu_fast_hash64 (test_msg, 2011); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check2011, &res64, sizeof(res64))); uint32_t res32; res32 = gu_fast_hash32 (test_msg, 0); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check0, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 31); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check31, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 32); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check32, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 511); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check511, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 512); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check512, &res32, sizeof(res32))); res32 = gu_fast_hash32 (test_msg, 2011); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check2011, &res32, sizeof(res32))); } END_TEST /* Tests table hash functions: * - for 64-bit platforms table hash should be identical to fast 64-bit hash, * - for 32-bit platforms table hash is different. */ #if GU_WORDSIZE == 64 START_TEST (gu_table_hash_test) { size_t res; ck_assert(sizeof(res) <= 8); res = gu_table_hash (test_msg, 0); res = gu_le64(res); ck_assert(!check(fast_hash64_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 15); res = gu_le64(res); ck_assert(!check(fast_hash64_check15, &res, sizeof(res))); res = gu_table_hash (test_msg, 16); res = gu_le64(res); ck_assert(!check(fast_hash64_check16, &res, sizeof(res))); res = gu_table_hash (test_msg, 511); res = gu_le64(res); ck_assert(!check(fast_hash64_check511, &res, sizeof(res))); res = gu_table_hash (test_msg, 512); res = gu_le64(res); ck_assert(!check(fast_hash64_check512, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le64(res); ck_assert(!check(fast_hash64_check2011, &res, sizeof(res))); } END_TEST #elif GU_WORDSIZE == 32 static const uint8_t table_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t table_hash32_check32 [4] = { 0x65, 0x16, 0x17, 0x42 }; static const uint8_t table_hash32_check2011[4] = { 0xF9, 0xBC, 0xEF, 0x7A }; START_TEST (gu_table_hash_test) { size_t res; ck_assert(sizeof(res) <= 4); res = gu_table_hash (test_msg, 0); res = gu_le32(res); ck_assert(!check(table_hash32_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 32); res = gu_le32(res); ck_assert(!check(table_hash32_check32, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le32(res); ck_assert(!check(table_hash32_check2011, &res, sizeof(res))); } END_TEST #else /* GU_WORDSIZE == 32 */ # error "Unsupported word size" #endif Suite *gu_hash_suite(void) { Suite *s = suite_create("Galera hash"); TCase *tc = tcase_create("gu_hash"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_hash_test); tcase_add_test (tc, gu_fast_hash_test); tcase_add_test (tc, gu_table_hash_test); return s; } galera-4-26.4.25/galerautils/tests/SConscript000644 000164 177776 00000007745 15107057155 022221 0ustar00jenkinsnogroup000000 000000 import os Import('check_env', 'crc32c_cppflags') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' # #/galerautils/src #/common ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) gu_tests_srcs = Split(''' gu_tests.c gu_mem_test.c gu_bswap_test.c gu_fnv_test.c gu_mmh3_test.c gu_spooky_test.c gu_hash_test.c gu_time_test.c gu_fifo_test.c gu_uuid_test.c gu_dbug_test.c gu_lock_step_test.c gu_str_test.c gu_utils_test.c ''') gu_tests_objs = env.SharedObject(gu_tests_srcs) # Need to specify dedicated preprocessor macro for CRC32C unit test only crc32c_env = env.Clone() crc32c_env.Append(CPPFLAGS = crc32c_cppflags) crc32c_objs = crc32c_env.SharedObject(['gu_crc32c_test.c']) # Path to certificates used in SSL tests. asio_test_cert_dir = '\\"' + Dir('#').abspath + '/tests/conf' + '\\"' env.Append(CPPFLAGS = ' -DGU_ASIO_TEST_CERT_DIR=' + asio_test_cert_dir) gu_tests = env.Program(target = 'gu_tests', source = gu_tests_objs + crc32c_objs) env.Test("gu_tests.passed", gu_tests) env.Alias("test", "gu_tests.passed") Clean(gu_tests, '#/gu_tests.log') gu_testspp = env.Program(target = 'gu_tests++', source = Split(''' gu_atomic_test.cpp gu_vector_test.cpp gu_string_test.cpp gu_vlq_test.cpp gu_digest_test.cpp gu_mem_pool_test.cpp gu_alloc_test.cpp gu_rset_test.cpp gu_string_utils_test.cpp gu_uri_test.cpp gu_gtid_test.cpp gu_config_test.cpp gu_net_test.cpp gu_datetime_test.cpp gu_histogram_test.cpp gu_stats_test.cpp gu_thread_test.cpp gu_shared_ptr_test.cpp gu_asio_test.cpp gu_deqmap_test.cpp gu_progress_test.cpp gu_utils_test++.cpp gu_tests++.cpp ''')) env.Test("gu_tests++.passed", gu_testspp) env.Alias("test", "gu_tests++.passed") Clean(gu_testspp, '#/gu_tests++.log') gu_to_test = env.Program(target = 'gu_to_test', source = Split(''' gu_to_test.c ''')) avalanche = env.Program(target = 'avalanche', source = Split(''' avalanche.c ''')) Import('all_tests') if all_tests: env.Test("gu_to_test.passed", gu_to_test) env.Test("avalanche.passed", avalanche) # copy_vs_assignment = env.Program(target = 'copy_vs_assignment', # source = Split(''' # copy_vs_assignment.cpp # ''')) deqmap_bench = env.Program(target = 'deqmap_bench', source = Split(''' deqmap_bench.cpp ''')) crc32c_bench = crc32c_env.Program(target = 'crc32c_bench', source = Split(''' crc32c_bench.cpp ''')) galera-4-26.4.25/galerautils/tests/gu_deqmap_test.cpp000644 000164 177776 00000061464 15107057155 023712 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2020 Codership Oy #define GU_DEQMAP_CONSISTENCY_CHECKS 1 #include "../src/gu_deqmap.hpp" #include "gu_deqmap_test.hpp" #include // rand() class Test { public: Test() : val_(-1) {} // Null object explicit Test(int v) : val_(v) {} bool operator ==(const Test& other) const { return val_ == other.val_; } bool operator !=(const Test& other) const { return !operator==(other); } int operator +() const { return val_; } private: int val_; }; static std::ostream& operator <<(std::ostream& os, const Test& t) { os << +t; return os; } START_TEST(ctor_clear) { typedef gu::DeqMap Map; Map m(-1); ck_assert(m.size() <= 0); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(-1)); ck_assert(m.upper_bound(0) == m.index_end()); m.clear(5); ck_assert(m.size() <= 0); ck_assert(m.index_begin() == Map::index_type(5)); ck_assert(m.index_end() == Map::index_type(5)); ck_assert(m.upper_bound(0) == m.index_begin()); } END_TEST START_TEST(push_pop) { typedef gu::DeqMap Map; Map m(-1); /* some push acton */ m.push_back(1); // -1 m.push_back(2); // 0 m.push_back(3); // 1 m.push_front(4);// -2 /* Here we have: 4, 1, 2, 3 */ ck_assert(m.size() == 4); ck_assert(m.front() == 4); ck_assert(m.back() == 3); ck_assert(m.index_begin() == Map::index_type(-2)); ck_assert(m.index_end() == Map::index_type(2)); m.pop_front(); /* Here we have: 1, 2, 3 */ ck_assert(m.size() == 3); ck_assert(m.front() == 1); ck_assert(m.back() == 3); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(2)); m.pop_back(); /* Here we have: 1, 2 */ ck_assert(m.size() == 2); ck_assert(m.front() == 1); ck_assert(m.back() == 2); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(1)); m.pop_front(); m.pop_front(); /* Here we have: empty */ ck_assert(m.size() == 0); ck_assert(m.index_begin() == Map::index_type(1)); ck_assert(m.index_end() == Map::index_type(1)); m.push_front(7); // 0 ck_assert(m.size() == 1); ck_assert(m.front() == 7); ck_assert(m.back() == 7); ck_assert(m.index_begin() == Map::index_type(0)); ck_assert(m.index_end() == Map::index_type(1)); m.pop_back(); ck_assert(m.size() == 0); ck_assert(m.index_begin() == Map::index_type(0)); ck_assert(m.index_end() == Map::index_type(0)); } END_TEST START_TEST(pop_holes) /* autoshrinking when popping on container with holes */ { typedef gu::DeqMap Map; Map m(-1); ck_assert(m.size() == 0); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(-1)); ck_assert(m.index_back() == Map::index_type(-2)); m.insert(1, 1); m.insert(4, 4); ck_assert(m.size() == 4); ck_assert(m.index_begin() == Map::index_type(1)); ck_assert(m.index_end() == Map::index_type(5)); ck_assert(m.index_back() == Map::index_type(4)); ck_assert(Map::not_set(m[2])); ck_assert(Map::not_set(m[3])); m.pop_front(); ck_assert(m.size() == 1); ck_assert(m.index_begin() == Map::index_type(4)); ck_assert(m.index_end() == Map::index_type(5)); ck_assert(m.index_back() == Map::index_type(4)); ck_assert(*m.begin() == 4); m.insert(1, 1); ck_assert(m.size() == 4); ck_assert(m.index_begin() == Map::index_type(1)); ck_assert(m.index_end() == Map::index_type(5)); ck_assert(m.index_back() == Map::index_type(4)); ck_assert(Map::not_set(m[2])); ck_assert(Map::not_set(m[3])); m.pop_back(); ck_assert(m.size() == 1); ck_assert(m.index_begin() == Map::index_type(1)); ck_assert(m.index_end() == Map::index_type(2)); ck_assert(m.index_back() == Map::index_type(1)); ck_assert(*m.begin() == 1); } END_TEST START_TEST(at) { typedef gu::DeqMap Map; Map m(-1); try { m.at(-1); ck_abort_msg("expected exception"); } catch (gu::NotFound&) {} m.push_back(3); try { ck_assert(3 == m.at(-1)); } catch (...) { ck_abort_msg("unexpected exception"); } try { m.at(-2); ck_abort_msg("expected exception"); } catch (gu::NotFound&) {} try { m.at(0); ck_abort_msg("expected exception"); } catch (gu::NotFound&) {} } END_TEST START_TEST(iterators_insert) { typedef gu::DeqMap Map; Map m(-1); m.insert(m.begin(), 4, 4); /* here we have 4, 4, 4, 4 */ ck_assert(m.size() == 4); ck_assert(*m.begin() == 4); ck_assert(*m.rbegin() == 4); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(3)); m.insert(m.begin() + 1, 2, 2); // bulk insert (overwrite) in the middle /* here we have 4, 2, 2, 4 */ ck_assert(m.size() == 4); ck_assert(*m.begin() == 4); ck_assert(*m.rbegin() == 4); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(3)); m.insert(m.begin(), 2, 1); // bulk insert (overwrite) in the beginning /* here we have 1, 1, 2, 4 */ ck_assert(m.size() == 4); ck_assert(*m.begin() == 1); ck_assert(*m.rbegin() == 4); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(3)); m.insert(m.end(), 2, 5); // bulk insert in the end /* here we have 1, 1, 2, 4, 5, 5 */ ck_assert(m.size() == 6); ck_assert(*m.begin() == 1); ck_assert(*m.rbegin() == 5); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(5)); m.insert(m.begin(), -1); // single insert (overwrite) in the beginning /* here we have -1, 1, 2, 4, 5, 5 */ ck_assert(m.size() == 6); ck_assert(*m.begin() == -1); ck_assert(*m.rbegin() == 5); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(5)); Map::iterator b(m.begin()); ++b; ck_assert(!Map::not_set(*b)); ck_assert(*b == 1); ck_assert(m.index(b) == 0); ++b; ck_assert(*b == 2); ck_assert(m.index(b) == 1); m.insert(b, 1); // single insert (overwrite) in the middle /* here we have -1, 1, 1, 4, 5, 5 */ ck_assert(m.size() == 6); ck_assert(*b == 1); m.push_back(6); // single insert in the end /* here we have -1, 1, 1, 4, 5, 5, 6 */ ck_assert(m.size() == 7); ck_assert(*m.begin() == -1); ck_assert(*m.rbegin() == 6); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == Map::index_type(6)); ck_assert(m.index_back() == Map::index_type(5)); ck_assert(m[m.index_back()] == 6); b = m.begin() + 3; ck_assert(*b == 4); ck_assert(m.index(b) == 2); *b = 2; // assignment via iterator /* here we have -1, 1, 1, 2, 5, 5, 6 */ ck_assert(m.size() == 7); ck_assert(*b == 2); Map::reverse_iterator rb(m.rbegin()); ck_assert(*rb == 6); ck_assert(m.index(rb) == 5); *rb = 5; /* here we have -1, 1, 1, 2, 5, 5, 5 */ ck_assert(*rb == 5); ++rb; ck_assert(*rb == 5); ck_assert(m.index(rb) == 4); *rb = 4; /* here we have -1, 1, 1, 2, 5, 4, 5 */ ck_assert(*rb == 4); } END_TEST START_TEST(iterators_erase) { typedef gu::DeqMap Map; Map m(-1); Map::size_type init_size(12); m.insert(m.begin(), init_size, 1); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); Map::iterator b(m.begin() + 2); m.erase(b); // single erase in the middle /* here we have 1, 1, N, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ /* b */ ck_assert(Map::not_set(*b)); Map::iterator e(m.end() - 4); m.erase(e, e + 2); // bulk erase in the middle /* here we have 1, 1, N, 1, 1, 1, 1, 1, N, N, 1, 1 */ /* b e */ ck_assert(Map::not_set(*e)); ck_assert(m.size() == init_size); ck_assert(m.index_begin() == Map::index_type(-1)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.begin()); // single erase at the front /* here we have 1, N, 1, 1, 1, 1, 1, N, N, 1, 1 */ /* b e */ ck_assert(m.size() == init_size - 1); ck_assert(m.index_begin() == Map::index_type(0)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.begin()); // single erase at the front before hole /* here we have 1, 1, 1, 1, 1, N, N, 1, 1 */ /* b e */ ck_assert(m.size() == init_size - 3); ck_assert(m.index_begin() == Map::index_type(2)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.end() - 1); // single erase at the back /* here we have 1, 1, 1, 1, 1, N, N, 1 */ /* b e */ ck_assert(m.size() == init_size - 4); ck_assert(m.index_begin() == Map::index_type(2)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.end() - 1); // single erase at the back before hole /* here we have 1, 1, 1, 1, 1 */ /* b e */ ck_assert(m.size() == init_size - 7); ck_assert(m.index_begin() == Map::index_type(2)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.begin(), m.begin() + 2); // bulk erase at the front /* here we have 1, 1, 1 */ /* b e */ ck_assert(m.size() == init_size - 9); ck_assert(m.index_begin() == Map::index_type(4)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.erase(m.end() - 2, m.end()); // bulk erase at the end /* here we have 1 */ /* b e */ ck_assert(m.size() == init_size - 11); ck_assert(m.index_begin() == Map::index_type(4)); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.front() == 1); ck_assert(m.back() == 1); m.insert(m.end(), 16, 1); init_size = m.size(); ck_assert (init_size == 17); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ m.erase(m.begin() + 1, m.begin() + 3); /* here we have 1, N, N, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size); m.erase(m.begin() + 2, m.begin() + 4); /* here we have 1, N, N, N, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size); m.erase(m.begin(), m.begin() + 2); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size - 4); ck_assert(m.index_begin() == Map::index_type(8)); m.erase(m.end() - 3, m.end() - 1); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, N, N, 1 */ ck_assert(m.size() == init_size - 4); m.erase(m.end() - 4, m.end() - 1); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1, N, N, N, 1 */ ck_assert(m.size() == init_size - 4); m.erase(m.end() - 2, m.end()); /* here we have 1, 1, 1, 1, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size - 8); m.erase(m.begin() + 2, m.begin() + 4); /* here we have 1, 1, N, N, 1, 1, 1, 1, 1 */ ck_assert(m.size() == init_size - 8); m.erase(m.begin() + 5, m.begin() + 7); /* here we have 1, 1, N, N, 1, N, N, 1, 1 */ ck_assert(m.size() == init_size - 8); ck_assert(m.index_begin() == Map::index_type(8)); ck_assert(Map::not_set(m[m.index_begin() + 2])); ck_assert(Map::not_set(m[m.index_begin() + 3])); ck_assert(!Map::not_set(m[m.index_begin() + 4])); ck_assert(Map::not_set(m[m.index_begin() + 5])); ck_assert(Map::not_set(m[m.index_begin() + 6])); m.erase(m.begin() + 1, m.begin() + 8); /* here we have 1, N, N, N, N, N, N, N, 1 */ ck_assert(m.size() == init_size - 8); m.erase(m.begin()); /* here we have 1 */ ck_assert(m.size() == 1); ck_assert(m.index_begin() == Map::index_type(16)); } END_TEST /* Tests attempts to insert Null values to container of size SIZE. Two cases * for every insert method: beginning (equivalent to middle) and end */ static void null_insertions(size_t const SIZE) { typedef gu::DeqMap Map; Map::value_type const Null(Map::null_value()); ck_assert(Map::null_value() == Null); ck_assert(Map::not_set(Null)); Map::value_type const Default = Map::value_type(0); ck_assert(Null != Default); ck_assert(!Map::not_set(Default)); Map::index_type const Begin(-1); Map m(Begin); m.insert(m.end(), SIZE, Default); ck_assert(m.size() == SIZE); ck_assert(m.index_begin() == Begin); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert((m.size() == 0 || m.front() == Default)); ck_assert((m.size() == 0 || m.back() == Default)); try { m.push_front(Null); ck_abort_msg("No exception in push_front()"); } catch (std::invalid_argument& e) { try { m.push_back(Null); ck_abort_msg("No exception in push_back()"); } catch (std::invalid_argument& e) { ck_assert(m.size() == SIZE); ck_assert(m.index_begin() == Begin); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.size() == 0 || m.front() == Default); ck_assert(m.size() == 0 || m.back() == Default); } catch (...) { ck_abort_msg("Unexpected exception in push_back()"); } } catch (...) { ck_abort_msg("Unexpected exception in push_front()"); } try { m.insert(m.begin(), Null); ck_abort_msg("No exception in insert() at the begin()"); } catch (std::invalid_argument& e) { try { m.insert(m.end(), Null); ck_abort_msg("No exception in insert() at the end()"); } catch (std::invalid_argument& e) { ck_assert(m.size() == SIZE); ck_assert(m.index_begin() == Begin); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.size() == 0 || m.front() == Default); ck_assert(m.size() == 0 || m.back() == Default); } catch (...) { ck_abort_msg("Unexpected exception in insert() at the end()"); } } catch (...) { ck_abort_msg("Unexpected exception in insert() at the begin()"); } try { m.insert(m.begin(), 3, Null); ck_abort_msg("No exception in insert() at the begin()"); } catch (std::invalid_argument& e) { try { m.insert(m.end(), 3, Null); ck_abort_msg("No exception in insert() at the end()"); } catch (std::invalid_argument& e) { ck_assert(m.size() == SIZE); ck_assert(m.index_begin() == Begin); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.size() == 0 || m.front() == Default); ck_assert(m.size() == 0 || m.back() == Default); } catch (...) { ck_abort_msg("Unexpected exception in insert() at the end()"); } } catch (...) { ck_abort_msg("Unexpected exception in insert() at the begin()"); } try { m.insert(m.index_begin(), Null); ck_abort_msg("No exception in insert() at the begin()"); } catch (std::invalid_argument& e) { try { m.insert(m.index_end(), Null); ck_abort_msg("No exception in insert() at the end()"); } catch (std::invalid_argument& e) { ck_assert(m.size() == SIZE); ck_assert(m.index_begin() == Begin); ck_assert(m.index_end() == m.index_begin() + int(m.size())); ck_assert(m.size() == 0 || m.front() == Default); ck_assert(m.size() == 0 || m.back() == Default); } catch (...) { ck_abort_msg("Unexpected exception in insert() at the end()"); } } catch (...) { ck_abort_msg("Unexpected exception in insert() at the begin()"); } } START_TEST(null_insertions_0) /* tests null insertions to empty container */ { null_insertions(0); } END_TEST START_TEST(null_insertions_1) /* tests null insertions to non empty container */ { null_insertions(1); } END_TEST START_TEST(random_access) { typedef gu::DeqMap Map; Map::index_type const Min(-5); Map::index_type const Max(5); Map m(100); m.insert(Min, Test(Min)); m.insert(Max, Test(Max)); ck_assert(m.size() == size_t(Max - Min + 1)); ck_assert(m.index_begin() == Min); ck_assert(m.index_back() == Max); ck_assert(m.front() == Test(Min)); ck_assert(m.back() == Test(Max)); for (Map::index_type i(Min + 1); i < Max; ++i) { ck_assert(Map::not_set(m[i])); } for (Map::index_type i(Min + 1); i < Max; ++i) { Map::value_type const val((Test(i))); if (!Map::not_set(val)) m.insert(i, val); } for (Map::index_type i(Min); i <= Max; ++i) { Map::value_type const val((Test(i))); ck_assert(m[i] == val); if (!Map::not_set(val)) ck_assert(m.at(i) == val); } } END_TEST START_TEST(find) { typedef gu::DeqMap Map; Map m(0); m.insert(1, 1); m.insert(2, 2); m.insert(3, 3); ck_assert(m.size() == 3); ck_assert(m[1] == 1); ck_assert(m[3] == 3); for(Map::index_type i(m.index_begin()); i < m.index_end(); ++i) { ck_assert(*m.find(i) == i); *m.find(i) = i + 1; ck_assert(*m.find(i) == i + 1); } const Map& mc(m); // test const overload for(Map::index_type i(mc.index_begin()); i < mc.index_end(); ++i) ck_assert(*mc.find(i) == i + 1); } END_TEST START_TEST(random_test) { /* access methods */ typedef enum { ITERATOR, INDEX, PUSHPOP } how_t; typedef gu::DeqMap Map; class map_state { Map::size_type const size_; Map::index_type const index_begin_; Map::index_type const index_end_; Map::const_iterator const begin_; Map::const_iterator const end_; public: map_state(const Map& m) : size_ (m.size()), index_begin_(m.index_begin()), index_end_ (m.index_end()), begin_ (m.begin()), end_ (m.end()) {} bool operator==(const map_state& o) const { return size_ == o.size_ && index_begin_ == o.index_begin_ && index_end_ == o.index_end_ && begin_ == o.begin_ && end_ == o.end_; } bool operator!=(const map_state& o) const { return !operator==(o); } }; static int const SIZE(1<<13); // 8K static int const SEED(2); Map map(0); srand(SEED); /* Insert size elements into the map */ for (int i(0); i < SIZE; ++i) { int const val(rand()); int const idx(val % SIZE); int const begin(map.index_begin()); int const end(map.index_end()); bool size_change; how_t how; if (idx == end) { /* elements at the end can be inserted anyhow */ how = how_t(rand() % (PUSHPOP + 1)); size_change = true; } else if (begin <= idx && idx < end) { /* elements within the range can be inserted either by ITERATOR * or INDEX */ how = how_t(rand() % (INDEX + 1)); size_change = false; } else if (idx == begin - 1) { /* elements right in front can be inserted either by INDEX * or PUSHPOP */ how = how_t(rand() % (INDEX + 1) + 1); size_change = true; } else { /* elements that are way out can be inserted only by INDEX */ how = INDEX; size_change = true; } map_state const init_state(map); switch(how) { case ITERATOR: { Map::iterator it(map.begin() + (idx - begin)); it = map.insert(it, val); ck_assert(it != map.end()); if (size_change) ck_assert(init_state != map_state(map)); else ck_assert(init_state == map_state(map)); } break; case INDEX: map.insert(idx, val); if (size_change) ck_assert(init_state != map_state(map)); else ck_assert(init_state == map_state(map)); break; case PUSHPOP: if (idx + 1 == begin) map.push_front(val); else { ck_assert(idx != end); map.push_back(val); } ck_assert(init_state != map_state(map)); } } ck_assert(!map.empty()); /* now erase all elements */ while (!map.empty()) { int const begin(map.index_begin()); int const end(map.index_end()); ck_assert(begin < end); ck_assert(map.index_back() < end); int const size(end - begin); int const idx((rand() % size) + begin); ck_assert(idx < map.index_end()); bool size_change; how_t how; if (begin < idx && idx < (end - 1)) { /* from inside we can erase either by ITERATOR or INDEX */ how = how_t(rand() % (INDEX + 1)); size_change = false; } else { /* from the edges we can erase anyhow */ how = how_t(rand() % (PUSHPOP + 1)); size_change = true; } if (!Map::not_set(map[idx])) { ck_assert_msg((map[idx]%SIZE) == idx, /* see filling loop above */ "Expected %d, got %d %% %d = %d", idx, map[idx], SIZE, map[idx]%SIZE); } map_state const init_state(map); switch (how) { case ITERATOR: map.erase(map.begin() + (idx - begin)); if (size_change) ck_assert(init_state != map_state(map)); else ck_assert(init_state == map_state(map)); break; case INDEX: map.erase(idx); if (size_change) ck_assert(init_state != map_state(map)); else ck_assert(init_state == map_state(map)); break; case PUSHPOP: if (idx == begin) map.pop_front(); else map.pop_back(); ck_assert(init_state != map_state(map)); } } } END_TEST Suite* gu_deqmap_suite () { Suite* const s(suite_create("gu::DeqMap")); TCase* t; t = tcase_create("ctor_clear"); tcase_add_test(t, ctor_clear); suite_add_tcase(s, t); t = tcase_create("push_pop"); tcase_add_test(t, push_pop); tcase_add_test(t, pop_holes); suite_add_tcase(s, t); t = tcase_create("at"); tcase_add_test(t, at); suite_add_tcase(s, t); t = tcase_create("iterators"); tcase_add_test(t, iterators_insert); tcase_add_test(t, iterators_erase); suite_add_tcase(s, t); t = tcase_create("null_insertions"); tcase_add_test(t, null_insertions_0); tcase_add_test(t, null_insertions_1); suite_add_tcase(s, t); t = tcase_create("random_access"); tcase_add_test(t, random_access); suite_add_tcase(s, t); t = tcase_create("find"); tcase_add_test(t, find); suite_add_tcase(s, t); t = tcase_create("random"); tcase_add_test(t, random_test); tcase_set_timeout(t, 120); suite_add_tcase(s, t); return s; } galera-4-26.4.25/galerautils/tests/gu_bswap_test.h000644 000164 177776 00000000264 15107057155 023213 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_bswap_test__ #define __gu_bswap_test__ Suite *gu_bswap_suite(void); #endif /* __gu_bswap_test__ */ galera-4-26.4.25/galerautils/tests/gu_net_test.hpp000644 000164 177776 00000000307 15107057155 023223 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_net_test__ #define __gu_net_test__ #include extern Suite *gu_net_suite(void); #endif /* __gu_net_test__ */ galera-4-26.4.25/galerautils/tests/gu_hash_test.h000644 000164 177776 00000000313 15107057155 023015 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_hash_test__ #define __gu_hash_test__ #include extern Suite *gu_hash_suite(void); #endif /* __gu_hash_test__ */ galera-4-26.4.25/galerautils/tests/gu_bswap_test.c000644 000164 177776 00000002724 15107057155 023211 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #include #include #include "gu_bswap_test.h" #include "../src/gu_byteswap.h" START_TEST (gu_bswap_test) { // need volatile to prevent compile-time optimization volatile uint16_t s = 0x1234; volatile uint32_t i = 0x12345678; volatile uint64_t l = 0x1827364554637281LL; uint16_t sle, sbe; uint32_t ile, ibe; uint64_t lle, lbe; // first conversion sle = gu_le16(s); sbe = gu_be16(s); ile = gu_le32(i); ibe = gu_be32(i); lle = gu_le64(l); lbe = gu_be64(l); #if __BYTE_ORDER == __LITTLE_ENDIAN ck_assert(s == sle); ck_assert(i == ile); ck_assert(l == lle); ck_assert(s != sbe); ck_assert(i != ibe); ck_assert(l != lbe); #else ck_assert(s != sle); ck_assert(i != ile); ck_assert(l != lle); ck_assert(s == sbe); ck_assert(i == ibe); ck_assert(l == lbe); #endif /* __BYTE_ORDER */ // second conversion sle = gu_le16(sle); sbe = gu_be16(sbe); ile = gu_le32(ile); ibe = gu_be32(ibe); lle = gu_le64(lle); lbe = gu_be64(lbe); ck_assert(s == sle); ck_assert(i == ile); ck_assert(l == lle); ck_assert(s == sbe); ck_assert(i == ibe); ck_assert(l == lbe); } END_TEST Suite *gu_bswap_suite(void) { Suite *s = suite_create("Galera byteswap functions"); TCase *tc = tcase_create("gu_bswap"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_bswap_test); return s; } galera-4-26.4.25/galerautils/tests/gu_string_utils_test.hpp000644 000164 177776 00000000353 15107057155 025164 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_string_utils_test__ #define __gu_string_utils_test__ #include extern Suite* gu_string_utils_suite(void); #endif /* __gu_string_utils_test__ */ galera-4-26.4.25/galerautils/tests/gu_mmh3_test.c000644 000164 177776 00000022215 15107057155 022736 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy // $Id$ #include "gu_mmh3_test.h" #include "../src/gu_mmh3.h" #include "../src/gu_log.h" #include "../src/gu_hexdump.h" #include "../src/gu_byteswap.h" /* This is to verify all tails plus block + all tails. Max block is 16 bytes */ static const char test_input[] = "0123456789ABCDEF0123456789abcde"; typedef struct hash32 { uint8_t h[4]; } hash32_t; #define NUM_32_TESTS 8 /* 0 to 7 bytes */ static const hash32_t test_output32[NUM_32_TESTS] = { {{ 0x0b, 0x7c, 0x3e, 0xab }}, /* '' */ {{ 0xba, 0xeb, 0x75, 0x97 }}, /* '0' */ {{ 0x5d, 0x5c, 0x21, 0x60 }}, /* '01' */ {{ 0x4b, 0xff, 0x61, 0x41 }}, /* '012' */ {{ 0x35, 0x3b, 0x57, 0xca }}, /* '0123' */ {{ 0x09, 0xdd, 0x77, 0xf9 }}, /* '01234' */ {{ 0x1f, 0x3c, 0x29, 0x7b }}, /* '012345' */ {{ 0xe1, 0xbe, 0x2d, 0xce }} /* '0123456' */ }; typedef union hash128 { uint8_t h[16]; uint64_t x[2]; } hash128_t; #define NUM_128_TESTS 32 /* 0 to 31 bytes */ static const hash128_t test_output128[NUM_128_TESTS] = { {{ 0xa9,0xce,0x5a,0x56,0x0c,0x0b,0xf7,0xd6,0x63,0x4f,0x6f,0x81,0x0e,0x0b,0xf2,0x0a }}, {{ 0x72,0xa1,0x46,0xa3,0x73,0x03,0x49,0x85,0x30,0xb9,0x52,0xaa,0x3b,0x00,0xad,0x23 }}, {{ 0x4f,0x32,0xa2,0x15,0x91,0x00,0xea,0xaa,0x59,0x90,0x48,0x30,0xe5,0x86,0x50,0xee }}, {{ 0x55,0xfe,0x86,0x3b,0x9c,0x67,0xc6,0xee,0x5c,0x06,0x34,0xd0,0xe5,0x15,0xfb,0xdd }}, {{ 0x3a,0x50,0x35,0xe5,0x72,0x75,0xa5,0x5e,0x46,0x3d,0x0e,0x23,0xbb,0x17,0x5a,0x66 }}, {{ 0x3b,0xff,0xb5,0x1a,0x93,0x0c,0x77,0x9a,0x40,0x5f,0x62,0x0c,0x40,0x15,0x0b,0x6e }}, {{ 0x7c,0xf8,0xf9,0xd2,0xfa,0x5a,0x8b,0x51,0x65,0x3c,0xa5,0x0e,0xa2,0xca,0x0a,0x87 }}, {{ 0x95,0x69,0x33,0x98,0xe4,0xb2,0x2a,0x21,0xd4,0x23,0x21,0x80,0xb1,0x00,0x46,0xbb }}, {{ 0x92,0xca,0xd3,0xbb,0x39,0x16,0x96,0xb5,0x3a,0x61,0x58,0x53,0xbb,0xf8,0xc4,0xb0 }}, {{ 0x36,0xf0,0xa3,0xc8,0xdc,0x5e,0x46,0x20,0x12,0xcf,0xad,0x3f,0xda,0xd5,0x95,0x7a }}, {{ 0xb9,0x71,0x76,0x54,0xd3,0x74,0x9b,0x31,0x93,0xb2,0xd9,0xbf,0xad,0x78,0x49,0x7e }}, {{ 0x39,0x75,0xc6,0x34,0x38,0x65,0x60,0x32,0xb1,0xa3,0x02,0xd2,0xba,0x47,0x0b,0xc3 }}, {{ 0x37,0xcd,0xe3,0x34,0x7d,0x2d,0xa4,0xdc,0xf3,0x51,0xd1,0x1e,0x46,0xb8,0x1a,0xd4 }}, {{ 0xa0,0xf6,0xff,0xc6,0xcd,0x50,0xdf,0xa2,0x59,0x36,0x8d,0xdf,0x09,0x57,0x14,0x7b }}, {{ 0xeb,0x58,0x42,0xca,0x56,0xb5,0x94,0x16,0x10,0x86,0x38,0x5b,0x2c,0x4a,0x13,0x84 }}, {{ 0x5d,0xee,0x3a,0x5b,0x45,0x5f,0x92,0x7d,0x42,0x91,0x8a,0x7b,0xb6,0xc7,0xde,0xd9 }}, {{ 0x63,0xff,0xe5,0x55,0x38,0x3d,0xd6,0x5d,0xa4,0xad,0xcb,0xf6,0x0a,0xc3,0xd9,0x12 }}, {{ 0x86,0x15,0xd3,0x5a,0x47,0x81,0x3f,0xea,0x6b,0xbc,0x3b,0x82,0xd0,0x49,0xda,0x5d }}, {{ 0xb7,0x41,0xc9,0xf5,0x94,0x3f,0x91,0xa5,0x56,0x68,0x9c,0x12,0xc7,0xa1,0xd9,0x45 }}, {{ 0xb7,0x7c,0x2f,0x60,0xe3,0x2b,0x6a,0xd6,0x5e,0x24,0x6c,0xaf,0x8c,0x83,0x99,0xc7 }}, {{ 0x62,0xdb,0xad,0xab,0xda,0x51,0x82,0x0b,0x04,0xe6,0x7a,0x88,0xaa,0xae,0xfd,0xce }}, {{ 0x70,0x89,0xd2,0x6a,0x35,0x80,0x19,0xa4,0x71,0x0e,0x5c,0x68,0x33,0xf5,0x0c,0x67 }}, {{ 0x05,0xb3,0x50,0x50,0xbe,0x8d,0xaa,0x6e,0x32,0x02,0x1b,0x5e,0xe6,0xb7,0x5f,0x72 }}, {{ 0x85,0x60,0x7c,0x7a,0xdf,0xaa,0x67,0xc6,0xed,0x3e,0x7e,0x13,0x84,0x2c,0xd4,0x28 }}, {{ 0x51,0x4a,0xe3,0x56,0xe0,0x5f,0x7d,0x42,0xfb,0x41,0xec,0xfe,0xff,0xa4,0x74,0x13 }}, {{ 0xb8,0xc0,0xc1,0x01,0xc2,0x74,0xbb,0x84,0xc8,0xca,0x16,0x9c,0x6b,0xf3,0x3e,0x4d }}, {{ 0xab,0xd0,0x4a,0xc5,0xa4,0xc8,0xce,0xf4,0xf2,0xf5,0x2f,0xdc,0x22,0x4f,0x20,0xda }}, {{ 0x36,0x25,0x28,0x74,0xf0,0x4c,0x36,0x38,0xd2,0x9a,0x64,0xf8,0x11,0xcf,0xaf,0x28 }}, {{ 0x8b,0x79,0x18,0x09,0x14,0x19,0x3c,0xa0,0x5b,0x62,0x4d,0x09,0x18,0xdd,0x6a,0x89 }}, {{ 0xc0,0xae,0x4f,0x67,0x45,0x01,0x00,0xb7,0x75,0xc5,0x1c,0x56,0xdf,0x55,0x7c,0x04 }}, {{ 0xcd,0x5a,0xda,0xea,0xbc,0xfb,0x8d,0xc7,0x8a,0xd3,0xc6,0x70,0x12,0x34,0x82,0x84 }}, {{ 0x69,0x53,0x0d,0xc3,0x4d,0xd4,0x33,0xe9,0x00,0x1b,0x27,0x06,0x27,0x7f,0x48,0xf7 }} }; typedef void (*hash_f_t) (const void* key, int len, uint32_t seed, void* out); /* Verification code from the original SMHasher test suite */ static void smhasher_verification (hash_f_t hash, size_t const hashbytes, uint32_t* const res) { ssize_t const n_tests = 256; uint8_t key[n_tests]; uint8_t hashes[hashbytes * n_tests]; uint8_t final[hashbytes]; /* Hash keys of the form {0}, {0,1}, {0,1,2}... up to N=255,using 256-N as * the seed */ ssize_t i; for(i = 0; i < n_tests; i++) { key[i] = (uint8_t)i; hash (key, i, n_tests - i, &hashes[i * hashbytes]); } /* Then hash the result array */ hash (hashes, hashbytes * n_tests, 0, final); memcpy (res, final, sizeof(*res)); } static hash32_t smhasher_checks[3] = { {{ 0xE3, 0x7E, 0xF5, 0xB0 }}, /* mmh3_32 */ {{ 0x2A, 0xE6, 0xEC, 0xB3 }}, /* mmh3_x86_128 */ {{ 0x69, 0xBA, 0x84, 0x63 }} /* mmh3_x64_128 */ }; /* returns true if check fails */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { ssize_t str_size = size * 2.2 + 1; char c[str_size], r[str_size]; gu_hexdump (exp, size, c, sizeof(c), false); gu_hexdump (got, size, r, sizeof(r), false); gu_info ("expected MurmurHash3:\n%s\nfound:\n%s\n", c, r); return true; } return false; } START_TEST (gu_mmh32_test) { int i; uint32_t out; smhasher_verification (gu_mmh3_32, sizeof(out), &out); ck_assert_msg(!check(&smhasher_checks[0], &out, sizeof(out)), "gu_mmh3_32 failed."); for (i = 0; i < NUM_32_TESTS; i++) { uint32_t res = gu_mmh32 (test_input, i); res = gu_le32(res); ck_assert_msg(!check(&test_output32[i], &res, sizeof(res)), "gu_mmh32() failed at step %d",i); } } END_TEST #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ START_TEST (gu_mmh128_x86_test) { int i; uint32_t out32; smhasher_verification (gu_mmh3_x86_128, sizeof(hash128_t), &out32); ck_assert_msg(!check(&smhasher_checks[1], &out32, sizeof(out32)), "gu_mmh3_x86_128 failed."); for (i = 0; i < NUM_128_TESTS; i++) { hash128_t out; gu_mmh3_x86_128 (test_input, i, GU_MMH32_SEED, &out); check (&test_output128[i], &out, sizeof(out)); } } END_TEST #endif /* 0 */ START_TEST (gu_mmh128_x64_test) { int i; uint32_t out32; smhasher_verification (gu_mmh3_x64_128, sizeof(hash128_t), &out32); ck_assert_msg(!check(&smhasher_checks[2], &out32, sizeof(out32)), "gu_mmh3_x64_128 failed."); for (i = 0; i < NUM_128_TESTS; i++) { hash128_t out; gu_mmh128 (test_input, i, &out); ck_assert_msg(!check(&test_output128[i], &out, sizeof(out)), "gu_mmh128() failed at step %d", i); } } END_TEST /* Tests partial hashing functions */ START_TEST (gu_mmh128_partial) { hash128_t part; gu_mmh128_ctx_t ctx; gu_mmh128_init (&ctx); gu_mmh128_append (&ctx, test_input, 31); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[31], &part, sizeof(part)), "gu_mmh128_get() failed at one go"); gu_mmh128_init (&ctx); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[0], &part, sizeof(part)), "gu_mmh128_get() failed at init"); gu_mmh128_append (&ctx, test_input + 0, 0); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[0], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 0); gu_mmh128_append (&ctx, test_input + 0, 1); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[1], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 1); gu_mmh128_append (&ctx, test_input + 1, 2); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[3], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 3); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[3], &part, sizeof(part)), "gu_mmh128_get() failed at length %d again", 3); gu_mmh128_append (&ctx, test_input + 3, 20); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[23], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 23); gu_mmh128_append (&ctx, test_input + 23, 0); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[23], &part, sizeof(part)), "gu_mmh128_get() failed at length %d again", 23); gu_mmh128_append (&ctx, test_input + 23, 3); gu_mmh128_append (&ctx, test_input + 26, 3); gu_mmh128_append (&ctx, test_input + 29, 2); gu_mmh128_get (&ctx, &part); ck_assert_msg(!check(&test_output128[31], &part, sizeof(part)), "gu_mmh128_get() failed at length %d", 31); } END_TEST Suite *gu_mmh3_suite(void) { Suite *s = suite_create("MurmurHash3"); TCase *tc = tcase_create("gu_mmh3"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_mmh32_test); // tcase_add_test (tc, gu_mmh128_x86_test); tcase_add_test (tc, gu_mmh128_x64_test); tcase_add_test (tc, gu_mmh128_partial); return s; } galera-4-26.4.25/galerautils/tests/crc32c_bench.cpp000644 000164 177776 00000006735 15107057155 023127 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2020 Codership Oy */ #include "../src/gu_crc32c.h" #include #include #include #include #include #if __cplusplus >= 201103L #include #else #include static double time_diff(const struct timeval& l, const struct timeval& r) { double const left(double(l.tv_usec)*1.0e-06 + l.tv_sec); double const right(double(r.tv_usec)*1.0e-06 + r.tv_sec); return left - right; } #endif // C++11 static std::vector data(1<<21 /* 2M */); // Initialize data static class Setup { public: Setup() { for (size_t i(0); i < data.size(); ++i) { data[i] = static_cast(i); } } } setup; static uint32_t run_bench(size_t const len, size_t const reps) { static const size_t align_loop(sizeof(uint64_t)); if ((data.size() - len) < align_loop) throw std::out_of_range("Too many reps"); gu_crc32c_t state; gu_crc32c_init(&state); for (size_t r(0); r < reps; ++r) for (size_t i(0); i < align_loop; ++i) { // here we roll data window over the main data buffer to give equal // chance to different alignments gu_crc32c_append(&state, &data[i], len); } return gu_crc32c_get(state); } static void run_bench_with_impl(gu_crc32c_func_t impl, size_t len, size_t reps, const char* comment) { gu_crc32c_func = impl; // Run computation once to make complete possible lazy initializations. { gu_crc32c_t s; gu_crc32c_init(&s); gu_crc32c_append(&s, "1", 1); (void)gu_crc32c_get(s); } #if __cplusplus >= 201103L auto const start(std::chrono::steady_clock::now()); uint32_t const result(run_bench(len, reps)); auto const stop(std::chrono::steady_clock::now()); double const duration(std::chrono::duration(stop - start).count()); #else struct timeval start, stop; gettimeofday(&start, NULL); uint32_t const result(run_bench(len, reps)); gettimeofday(&stop, NULL); double const duration(time_diff(stop, start)); #endif // C++11 std::cout << comment << '\t' << len << '\t' << std::fixed << duration << '\t' << result << '\n'; } static gu_crc32c_func_t configured_impl; static void one_length(size_t const len, size_t const reps) { std::cout << "\nImpl: \tBytes:\tDuration:\tResult:\n"; run_bench_with_impl(gu_crc32c_sarwate, len, reps, "GU Sarwate "); run_bench_with_impl(gu_crc32c_slicing_by_4, len, reps, "GU Slicing4"); run_bench_with_impl(gu_crc32c_slicing_by_8, len, reps, "GU Slicing8"); #if defined(GU_CRC32C_X86) run_bench_with_impl(gu_crc32c_x86, len, reps, "GU x86_32 "); #if defined(GU_CRC32C_X86_64) run_bench_with_impl(gu_crc32c_x86_64, len, reps, "GU x86_64 "); #endif /* GU_CRC32C_X86_64 */ #endif /* GU_CRC32C_X86 */ #if defined(GU_CRC32C_ARM64) if (gu_crc32c_arm64 == configured_impl) run_bench_with_impl(gu_crc32c_arm64, len, reps, "GU arm64 "); #endif /* GU_CRC32C_X86 */ } int main() { gu_crc32c_configure(); // compute SW lookup tables configured_impl = gu_crc32c_func; one_length(11, 1<<22 /* 4M */); one_length(31, 1<<21 /* 2M */); one_length(64, 1<<20 /* 1M */); one_length(512, 1<<17 /* 128K */); one_length(1<<20, 64 /* 1M */); } galera-4-26.4.25/galerautils/tests/gu_crc32c_test.h000644 000164 177776 00000000333 15107057155 023153 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_crc32c_test_h__ #define __gu_crc32c_test_h__ #include Suite* gu_crc32c_suite(void); #endif /* __gu_crc32c_test_h__ */ galera-4-26.4.25/galerautils/tests/gu_digest_test.cpp000644 000164 177776 00000022141 15107057155 023707 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy /* * This unit test is mostly to check that Galera hash definitions didn't change: * correctness of hash algorithms definitions is checked in respective unit * tests. * * By convention checks are made against etalon byte arrays, so integers must be * converted to little-endian. * * $Id$ */ #include "../src/gu_digest.hpp" #include "gu_digest_test.hpp" #include "../src/gu_hexdump.hpp" #include "../src/gu_logger.hpp" #include "../src/gu_inttypes.hpp" /* checks equivalence of two buffers, returns true if check fails and logs * buffer contents. */ static bool check (const void* const exp, const void* const got, ssize_t size) { if (memcmp (exp, got, size)) { log_info << "expected hash value:\n" << gu::Hexdump(exp, size) << "\nfound:\n" << gu::Hexdump(got, size) << "\n"; return true; } return false; } static const char test_msg[2048] = { 0, }; #define GU_HASH_TEST_LENGTH 43 /* some random prime */ static const uint8_t gu_hash128_check[16] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84,0x73,0x41,0x3F,0xA5,0xEB,0x27,0x40,0x2F }; static const uint8_t gu_hash64_check[8] = { 0xFA,0x2C,0x78,0x67,0x35,0x99,0xD9,0x84 }; static const uint8_t gu_hash32_check[4] = { 0xFA,0x2C,0x78,0x67 }; /* Tests partial hashing functions */ START_TEST (gu_hash_test) { gu::Hash hash_one; hash_one.append(test_msg, GU_HASH_TEST_LENGTH); uint8_t res128_one[16]; hash_one.gather(res128_one); ck_assert_msg(!check(gu_hash128_check, res128_one, sizeof(res128_one)), "gu::Hash::gather() failed in single mode."); gu::Hash::digest(test_msg, GU_HASH_TEST_LENGTH, res128_one); ck_assert_msg(!check(gu_hash128_check, res128_one, sizeof(res128_one)), "gu::Hash::digest() failed."); gu::Hash hash_multi; int off = 0; hash_multi.append(test_msg, 16); off += 16; hash_multi.append(test_msg + off, 15); off += 15; hash_multi.append(test_msg + off, 7); off += 7; hash_multi.append(test_msg + off, 5); off += 5; ck_assert(off == GU_HASH_TEST_LENGTH); uint8_t res128_multi[16]; hash_multi.gather(res128_multi); ck_assert_msg(!check(gu_hash128_check, res128_multi, sizeof(res128_multi)), "gu::Hash::gather() failed in multi mode."); uint64_t res64; hash_multi.gather(&res64); uint64_t const res(gu_hash64(test_msg, GU_HASH_TEST_LENGTH)); ck_assert_msg(res == res64, "got 0x%0" PRIx64 ", expected 0x%" PRIx64, res64, res); res64 = gu_le64(res64); ck_assert_msg(!check(gu_hash64_check, &res64, sizeof(res64)), "gu::Hash::gather() failed."); uint32_t res32; hash_one(res32); ck_assert(gu_hash32(test_msg, GU_HASH_TEST_LENGTH) == res32); res32 = gu_le32(res32); ck_assert_msg(!check(gu_hash32_check, &res32, sizeof(res32)), "gu::Hash::gather() failed."); } END_TEST static const uint8_t fast_hash128_check0 [16] = { 0xA9,0xCE,0x5A,0x56,0x0C,0x0B,0xF7,0xD6,0x63,0x4F,0x6F,0x81,0x0E,0x0B,0xF2,0x0A }; static const uint8_t fast_hash128_check511 [16] = { 0xC6,0x7F,0x4C,0xE7,0x6F,0xE0,0xDA,0x14,0xCC,0x9F,0x21,0x76,0xAF,0xB5,0x12,0x1A }; static const uint8_t fast_hash128_check512 [16] = { 0x38,0x8D,0x2B,0x90,0xC8,0x7F,0x11,0x53,0x3F,0xB4,0x32,0xC1,0xD7,0x2B,0x04,0x39 }; static const uint8_t fast_hash128_check2011[16] = { 0xB7,0xCE,0x75,0xC7,0xB4,0x31,0xBC,0xC8,0x95,0xB3,0x41,0xB8,0x5B,0x8E,0x77,0xF9 }; static const uint8_t fast_hash64_check0 [8] = { 0x6C, 0x55, 0xB8, 0xA1, 0x02, 0xC6, 0x21, 0xCA }; static const uint8_t fast_hash64_check15 [8] = { 0x28, 0x49, 0xE8, 0x34, 0x7A, 0xAB, 0x49, 0x34 }; static const uint8_t fast_hash64_check16 [8] = { 0x44, 0x40, 0x2C, 0x82, 0xD3, 0x8D, 0xAA, 0xFE }; static const uint8_t fast_hash64_check511 [8] = { 0xC6, 0x7F, 0x4C, 0xE7, 0x6F, 0xE0, 0xDA, 0x14 }; static const uint8_t fast_hash64_check512 [8] = { 0x38, 0x8D, 0x2B, 0x90, 0xC8, 0x7F, 0x11, 0x53 }; static const uint8_t fast_hash64_check2011[8] = { 0xB7, 0xCE, 0x75, 0xC7, 0xB4, 0x31, 0xBC, 0xC8 }; static const uint8_t fast_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t fast_hash32_check31 [4] = { 0x1E, 0xFF, 0x48, 0x38 }; static const uint8_t fast_hash32_check32 [4] = { 0x63, 0xC2, 0x53, 0x0D }; static const uint8_t fast_hash32_check511 [4] = { 0xC6, 0x7F, 0x4C, 0xE7 }; static const uint8_t fast_hash32_check512 [4] = { 0x38, 0x8D, 0x2B, 0x90 }; static const uint8_t fast_hash32_check2011[4] = { 0xB7, 0xCE, 0x75, 0xC7 }; /* Tests fast hash functions */ START_TEST (gu_fast_hash_test) { uint8_t res128[16]; gu::FastHash::digest (test_msg, 0, res128); ck_assert(!check(fast_hash128_check0, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 511, res128); ck_assert(!check(fast_hash128_check511, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 512, res128); ck_assert(!check(fast_hash128_check512, res128, sizeof(res128))); gu::FastHash::digest (test_msg, 2011, res128); ck_assert(!check(fast_hash128_check2011, res128, sizeof(res128))); uint64_t res64; res64 = gu::FastHash::digest(test_msg, 0); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check0, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,15); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check15, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,16); res64 = gu_le64(res64); ck_assert(!check(fast_hash64_check16, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,511); res64 =gu_le64(res64); ck_assert(!check(fast_hash64_check511, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,512); res64 =gu_le64(res64); ck_assert(!check(fast_hash64_check512, &res64, sizeof(res64))); res64 = gu::FastHash::digest(test_msg,2011);res64 =gu_le64(res64); ck_assert(!check(fast_hash64_check2011, &res64, sizeof(res64))); uint32_t res32; res32 = gu::FastHash::digest(test_msg, 0); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check0, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,31); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check31, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,32); res32 = gu_le32(res32); ck_assert(!check(fast_hash32_check32, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,511); res32 =gu_le32(res32); ck_assert(!check(fast_hash32_check511, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,512); res32 =gu_le32(res32); ck_assert(!check(fast_hash32_check512, &res32, sizeof(res32))); res32 = gu::FastHash::digest(test_msg,2011); res32=gu_le32(res32); ck_assert(!check(fast_hash32_check2011, &res32, sizeof(res32))); } END_TEST #if SKIP_TABLE_FUNCTIONS /* Tests table hash functions: * - for 64-bit platforms table hash should be identical to fast 64-bit hash, * - for 32-bit platforms table hash is different. */ #if GU_WORDSIZE == 64 START_TEST (gu_table_hash_test) { size_t res; ck_assert(sizeof(res) > 8); res = gu_table_hash (test_msg, 0); res = gu_le64(res); ck_assert(!check(fast_hash64_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 15); res = gu_le64(res); ck_assert(!check(fast_hash64_check15, &res, sizeof(res))); res = gu_table_hash (test_msg, 16); res = gu_le64(res); ck_assert(!check(fast_hash64_check16, &res, sizeof(res))); res = gu_table_hash (test_msg, 511); res = gu_le64(res); ck_assert(!check(fast_hash64_check511, &res, sizeof(res))); res = gu_table_hash (test_msg, 512); res = gu_le64(res); ck_assert(!check(fast_hash64_check512, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le64(res); ck_assert(!check(fast_hash64_check2011, &res, sizeof(res))); } END_TEST #elif GU_WORDSIZE == 32 static const uint8_t table_hash32_check0 [4] = { 0x0B, 0x7C, 0x3E, 0xAB }; static const uint8_t table_hash32_check32 [4] = { 0x65, 0x16, 0x17, 0x42 }; static const uint8_t table_hash32_check2011[4] = { 0xF9, 0xBC, 0xEF, 0x7A }; START_TEST (gu_table_hash_test) { size_t res; ck_assert(sizeof(res) <= 4); res = gu_table_hash (test_msg, 0); res = gu_le32(res); ck_assert(!check(table_hash32_check0, &res, sizeof(res))); res = gu_table_hash (test_msg, 32); res = gu_le32(res); ck_assert(!check(table_hash32_check32, &res, sizeof(res))); res = gu_table_hash (test_msg, 2011); res = gu_le32(res); ck_assert(!check(table_hash32_check2011, &res, sizeof(res))); } END_TEST #else /* GU_WORDSIZE == 32 */ # error "Unsupported word size" #endif #endif // SKIP_TABLE_FUNCTIONS Suite *gu_digest_suite(void) { Suite *s = suite_create("gu::Hash"); TCase *tc = tcase_create("gu_hash"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_hash_test); tcase_add_test (tc, gu_fast_hash_test); // tcase_add_test (tc, gu_table_hash_test); return s; } galera-4-26.4.25/galerautils/tests/gu_stats_test.cpp000644 000164 177776 00000002457 15107057155 023576 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014-2020 Codership Oy */ #include "../src/gu_stats.hpp" #include "gu_stats_test.hpp" #include #include using namespace gu; static inline bool double_equal(double a, double b) { return (std::fabs(a - b) <= std::fabs(a + b) * std::numeric_limits::epsilon()); } START_TEST(test_stats) { Stats st; st.insert(10.0); st.insert(20.0); st.insert(30.0); ck_assert(double_equal(st.mean(), 20.0)); ck_assert_msg(double_equal(st.variance() * 3, 200.0), "%e != 0", st.variance()*3-200.0); ck_assert(double_equal(st.min(), 10.0)); ck_assert(double_equal(st.max(), 30.0)); st.clear(); st.insert(10.0); ck_assert(double_equal(st.mean(), 10.0)); ck_assert(double_equal(st.variance(), 0.0)); ck_assert(double_equal(st.min(), 10.0)); ck_assert(double_equal(st.max(), 10.0)); st.clear(); ck_assert(double_equal(st.mean(), 0.0)); ck_assert(double_equal(st.variance(), 0.0)); ck_assert(double_equal(st.min(), 0.0)); ck_assert(double_equal(st.max(), 0.0)); } END_TEST Suite* gu_stats_suite() { TCase* t = tcase_create ("test_stats"); tcase_add_test (t, test_stats); Suite* s = suite_create ("gu::Stats"); suite_add_tcase (s, t); return s; } galera-4-26.4.25/galerautils/tests/gu_to_test.c000644 000164 177776 00000022061 15107057155 022513 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ #include #include #include // printf() #include // strerror() #include // strtol(), exit(), EXIT_SUCCESS, EXIT_FAILURE #include // errno #include // gettimeofday() #include // usleep() #include #include struct thread_ctx { gu_thread_t thread; long thread_id; long stat_grabs; // how many times gcs_to_grab() was successful long stat_cancels;// how many times gcs_to_cancel() was called long stat_fails; // how many times gcs_to_grab() failed long stat_self; // how many times gcs_self_cancel() was called }; /* returns a semirandom number (hash) from seqno */ static inline ulong my_rnd (uint64_t x) { x = 2654435761U * x; // http://www.concentric.net/~Ttwang/tech/inthash.htm return (ulong)(x ^ (x >> 32)); // combine upper and lower halfs for better // randomness } /* whether to cancel self */ static inline ulong self_cancel (ulong rnd) { return !(rnd & 0xf); // will return TRUE once in 16 } /* how many other seqnos to cancel */ static inline ulong cancel (ulong rnd) { #if 0 // this causes probablity of conflict 88% // and average conflicts per seqno 3.5. Reveals a lot of corner cases return (rnd & 0x70) >> 4; // returns 0..7 #else // this is more realistic. // probability of conflict 25%, conflict rate 0.375 ulong ret = (rnd & 0x70) >> 4; // returns 0,0,0,0,0,0,1,2 if (gu_likely(ret < 5)) return 0; else return (ret - 5); #endif } /* offset of seqnos to cancel */ static inline ulong cancel_offset (ulong rnd) { return ((rnd & 0x700) >> 8) + 1; // returns 1 - 8 } static gu_to_t* to = NULL; static ulong thread_max = 16; // default number of threads static gu_seqno_t seqno_max = 1<<20; // default number of seqnos to check /* mutex to synchronize threads start */ static gu_mutex_t start = GU_MUTEX_INITIALIZER; static const unsigned int t = 10; // optimal sleep time static const struct timespec tsleep = { 0, 10000000 }; // 10 ms void* run_thread(void* ctx) { struct thread_ctx* thd = ctx; gu_seqno_t seqno = thd->thread_id; // each thread starts with own offset // to guarantee uniqueness of seqnos // without having to lock mutex gu_mutex_lock (&start); // wait for start signal gu_mutex_unlock (&start); while (seqno < seqno_max) { long ret; ulong rnd = my_rnd(seqno); if (gu_unlikely(self_cancel(rnd))) { // printf("Self-cancelling %8llu\n", (unsigned long long)seqno); while ((ret = gu_to_self_cancel(to, seqno)) == -EAGAIN) usleep (t); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_self_cancel(%llu) returned %ld (%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } else { // printf ("Self-cancel success (%llu)\n", (unsigned long long)seqno); thd->stat_self++; } } else { // printf("Grabbing %8llu\n", (unsigned long long)seqno); while ((ret = gu_to_grab (to, seqno)) == -EAGAIN) nanosleep (&tsleep, NULL); if (gu_unlikely(ret)) { if (gu_likely(-ECANCELED == ret)) { // printf ("canceled (%llu)\n", (unsigned long long)seqno); thd->stat_fails++; } else { fprintf (stderr, "gu_to_grab(%llu) returned %ld (%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } } else { long cancels = cancel(rnd); // printf ("success (%llu), cancels = %ld\n", (unsigned long long)seqno, cancels); if (gu_likely(cancels)) { long offset = cancel_offset (rnd); gu_seqno_t cancel_seqno = seqno + offset; while (cancels-- && (cancel_seqno < seqno_max)) { ret = gu_to_cancel(to, cancel_seqno); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_cancel(%llu) by %llu " "failed: %s\n", (unsigned long long)cancel_seqno, (unsigned long long)seqno, strerror (-ret)); exit (EXIT_FAILURE); } else { // printf ("%llu canceled %llu\n", // seqno, cancel_seqno); cancel_seqno += offset; thd->stat_cancels++; } } } thd->stat_grabs++; ret = gu_to_release(to, seqno); if (gu_unlikely(ret)) { fprintf (stderr, "gu_to_release(%llu) failed: %ld(%s)\n", (unsigned long long)seqno, ret, strerror(-ret)); exit (EXIT_FAILURE); } } } seqno += thread_max; // this together with unique starting point // guarantees that seqnos are unique } // printf ("Thread %ld exiting. Last seqno = %llu\n", // thd->thread_id, (unsigned long long)(seqno - thread_max)); return NULL; } int main (int argc, char* argv[]) { // minimum to length required by internal logic ulong to_len = cancel(0xffffffff) * cancel_offset(0xffffffff); errno = 0; if (argc > 1) seqno_max = (1 << atol(argv[0])); if (argc > 2) thread_max = (1 << atol(argv[1])); if (errno) { fprintf (stderr, "Usage: %s [seqno [threads]]\nBoth seqno and threads" "are exponents of 2^n.\n", argv[0]); exit(errno); } printf ("Starting with %lu threads and %llu maximum seqno.\n", thread_max, (unsigned long long)seqno_max); /* starting with 0, enough space for all threads and cancels */ // 4 is a magic number to get it working without excessive sleep on amd64 to_len = to_len > thread_max ? to_len : thread_max; to_len *= 4; to = gu_to_create (to_len, 0); if (to != NULL) { printf ("Created TO monitor of length %lu\n", to_len); } else { exit (-ENOMEM); } /* main block */ { long i, ret; clock_t start_clock, stop_clock; double time_spent; struct thread_ctx thread[thread_max]; gu_mutex_lock (&start); { /* initialize threads */ for (i = 0; (ulong)i < thread_max; i++) { thread[i].thread_id = i; thread[i].stat_grabs = 0; thread[i].stat_cancels = 0; thread[i].stat_fails = 0; thread[i].stat_self = 0; ret = pthread_create(&(thread[i].thread), NULL, run_thread, &thread[i]); if (ret) { fprintf (stderr, "Failed to create thread %ld: %s", i, strerror(ret)); exit (EXIT_FAILURE); } } start_clock = clock(); } gu_mutex_unlock (&start); // release threads /* wait for threads to complete and accumulate statistics */ gu_thread_join (thread[0].thread, NULL); for (i = 1; (ulong)i < thread_max; i++) { pthread_join (thread[i].thread, NULL); thread[0].stat_grabs += thread[i].stat_grabs; thread[0].stat_cancels += thread[i].stat_cancels; thread[0].stat_fails += thread[i].stat_fails; thread[0].stat_self += thread[i].stat_self; } stop_clock = clock(); time_spent = gu_clock_diff (stop_clock,start_clock); /* print statistics */ printf ("%llu seqnos in %.3f seconds (%.3f seqno/sec)\n", (unsigned long long)seqno_max, time_spent, ((double) seqno_max)/time_spent); printf ("Overhead at 10000 actions/second: %.2f%%\n", (time_spent * 10000 * 100/* for % */)/seqno_max); printf ("Grabbed: %9lu\n" "Failed: %9lu\n" "Self-cancelled: %9lu\n" "Canceled: %9lu (can exceed total number of seqnos)\n", thread[0].stat_grabs, thread[0].stat_fails, thread[0].stat_self, thread[0].stat_cancels ); if (seqno_max != (thread[0].stat_grabs+thread[0].stat_fails+thread[0].stat_self)) { fprintf (stderr, "Error: total number of grabbed, failed and " "self-cancelled waiters does not match total seqnos.\n"); exit (EXIT_FAILURE); } } return 0; } galera-4-26.4.25/galerautils/tests/gu_tests++.hpp000644 000164 177776 00000002521 15107057155 022666 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2014 Codership Oy // $Id$ /*! * @file: package specific part of the main test file. */ #ifndef __gu_testspp_hpp__ #define __gu_testspp_hpp__ #define LOG_FILE "gu_tests++.log" #include "gu_atomic_test.hpp" #include "gu_vector_test.hpp" #include "gu_string_test.hpp" #include "gu_vlq_test.hpp" #include "gu_digest_test.hpp" #include "gu_mem_pool_test.hpp" #include "gu_alloc_test.hpp" #include "gu_rset_test.hpp" #include "gu_string_utils_test.hpp" #include "gu_uri_test.hpp" #include "gu_gtid_test.hpp" #include "gu_config_test.hpp" #include "gu_net_test.hpp" #include "gu_datetime_test.hpp" #include "gu_histogram_test.hpp" #include "gu_stats_test.hpp" #include "gu_thread_test.hpp" #include "gu_asio_test.hpp" #include "gu_deqmap_test.hpp" #include "gu_utils_test++.hpp" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gu_atomic_suite, gu_vector_suite, gu_string_suite, gu_vlq_suite, gu_digest_suite, gu_mem_pool_suite, gu_alloc_suite, gu_rset_suite, gu_string_utils_suite, gu_uri_suite, gu_gtid_suite, gu_config_suite, gu_net_suite, gu_datetime_suite, gu_histogram_suite, gu_stats_suite, gu_thread_suite, gu_asio_suite, gu_deqmap_suite, gu_utils_cpp_suite, 0 }; #endif /* __gu_testspp_hpp__ */ galera-4-26.4.25/galerautils/tests/gu_asio_test.cpp000644 000164 177776 00000264551 15107057155 023400 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2019-2023 Codership Oy */ #include "gu_asio.hpp" #define GU_ASIO_IMPL #include "gu_asio_stream_engine.hpp" #include "gu_asio_test.hpp" #include "gu_buffer.hpp" #include "gu_compiler.hpp" #include #include // recv(), send(), etc. // // Helper classes // class MockStreamEngine : public gu::AsioStreamEngine { public: MockStreamEngine(); ~MockStreamEngine(); std::string scheme() const GALERA_OVERRIDE { return "mock"; }; void assign_fd(int fd) GALERA_OVERRIDE { fd_ = fd; } enum op_status client_handshake() GALERA_OVERRIDE { ++count_client_handshake_called; last_error_ = next_error; return next_result; } enum op_status server_handshake() GALERA_OVERRIDE { ++count_server_handshake_called; last_error_ = next_error; log_info << "MockStreamEngine::server_handshake: called " << count_server_handshake_called << " next_result: " << next_result; return next_result; } op_result read(void* buf, size_t max_count) GALERA_OVERRIDE { ++count_read_called; ssize_t read_result(::recv(fd_, buf, max_count, 0)); return map_return_value(read_result, want_read); } op_result write(const void* buf, size_t count) GALERA_OVERRIDE { ++count_write_called; ssize_t write_result(::send(fd_, buf, count, MSG_NOSIGNAL)); return map_return_value(write_result, want_write); } void shutdown() GALERA_OVERRIDE { } gu::AsioErrorCode last_error() const GALERA_OVERRIDE { return last_error_; } op_result map_return_value(ssize_t result, enum op_status return_on_block) { if (next_result != success) { last_error_ = next_error; return {next_result, size_t(result)}; } if (result > 0) { return {success, size_t(result)}; } else if (result == 0) { return {eof, size_t(result)}; } else if (errno == EAGAIN || errno == EWOULDBLOCK) { last_error_ = errno; return {return_on_block, size_t(result)}; } else { last_error_ = next_error; return {error, size_t(result)}; } } enum op_status next_result; int next_error; size_t count_client_handshake_called; size_t count_server_handshake_called; size_t count_read_called; size_t count_write_called; private: int fd_; int last_error_; }; MockStreamEngine::MockStreamEngine() : next_result(success) , next_error() , count_client_handshake_called() , count_server_handshake_called() , count_read_called() , count_write_called() , fd_() , last_error_() { log_info << "MockStreamEngine"; } MockStreamEngine::~MockStreamEngine() { log_info << "~MockStreamEngine"; } class MockSocketHandler : public gu::AsioSocketHandler { public: MockSocketHandler(const std::string& context = "") : gu::AsioSocketHandler() , write_buffer_() , read_buffer_() , invocations_() , connect_handler_called_() , expect_read_() , bytes_read_() , bytes_written_() , last_error_code_() , context_(context) { log_info << "MockSocketHandler(" << context_ << ")"; } ~MockSocketHandler() { log_info << "~MockSocketHandler(" << context_ << ")"; } virtual void connect_handler(gu::AsioSocket& socket, const gu::AsioErrorCode& ec) GALERA_OVERRIDE { log_info << "MockSocketHandler(" << context_ << ") connected: " << &socket << " error_code: " << ec; invocations_.push_back("connect"); connect_handler_called_ = true; last_error_code_ = ec; } virtual void write_handler(gu::AsioSocket&, const gu::AsioErrorCode& ec, size_t bytes_transferred) GALERA_OVERRIDE { std::ostringstream oss; oss << "write:" << bytes_transferred; invocations_.push_back(oss.str()); bytes_written_ += bytes_transferred; last_error_code_ = ec; } virtual size_t read_completion_condition(gu::AsioSocket&, const gu::AsioErrorCode& ec, size_t bytes_transferred) GALERA_OVERRIDE { std::ostringstream oss; oss << "read_completion:" << bytes_transferred; invocations_.push_back(oss.str()); last_error_code_ = ec; return (expect_read_ - std::min(bytes_transferred + bytes_read_, expect_read_)); } virtual void read_handler(gu::AsioSocket&, const gu::AsioErrorCode& ec, size_t bytes_transferred) GALERA_OVERRIDE { std::ostringstream oss; oss << "read:" << bytes_transferred; invocations_.push_back(oss.str()); bytes_read_ += bytes_transferred; last_error_code_ = ec; oss.str(""); oss.clear(); std::copy(invocations_.begin(), invocations_.end(), std::ostream_iterator(oss, "\n")); log_info << "Invocations so far:\n" << oss.str(); } bool connect_handler_called() const { return connect_handler_called_; } void expect_read(size_t bytes) { expect_read_ = bytes; } size_t bytes_read() const { return bytes_read_; } void consume(size_t count) { assert(count <= bytes_read_); bytes_read_ -= count; } size_t bytes_written() const { return bytes_written_; } const gu::AsioErrorCode& last_error_code() const { return last_error_code_; } private: std::array write_buffer_; std::string read_buffer_; std::vector invocations_; bool connect_handler_called_; size_t expect_read_; size_t bytes_read_; size_t bytes_written_; gu::AsioErrorCode last_error_code_; std::string context_; }; #include "gu_disable_non_virtual_dtor.hpp" class MockAcceptorHandler : public gu::AsioAcceptorHandler , public std::enable_shared_from_this { public: MockAcceptorHandler() : cur_stream_engine() , next_stream_engine() , next_socket_handler(std::make_shared("server")) , accepted_socket_() , accepted_handler_() { } ~MockAcceptorHandler() { } virtual void accept_handler(gu::AsioAcceptor& acceptor, const std::shared_ptr& socket, const gu::AsioErrorCode& ec) GALERA_OVERRIDE { log_info << "accepted " << socket.get() << " error code: " << ec; if (not ec) { accepted_socket_ = socket; accepted_handler_ = next_socket_handler; } if (next_stream_engine) { cur_stream_engine = next_stream_engine; next_stream_engine = std::make_shared(); } next_socket_handler = std::make_shared(); acceptor.async_accept(shared_from_this(), next_socket_handler, next_stream_engine); } std::shared_ptr accepted_socket() const { return accepted_socket_; } std::shared_ptr accepted_handler() const { return accepted_handler_; } void reset() { accepted_socket_.reset(); accepted_handler_.reset(); } /* Stream engine which was assigned during previous call to * accept_handler(). */ std::shared_ptr cur_stream_engine; /* Stream engine which will be assigned when the * accept_handler() is called next time. */ std::shared_ptr next_stream_engine; /* Socket handler for the next accepted connection. */ std::shared_ptr next_socket_handler; private: std::shared_ptr accepted_socket_; std::shared_ptr accepted_handler_; }; #include "gu_enable_non_virtual_dtor.hpp" // // Address // START_TEST(test_make_address_v4) { auto a(gu::make_address("10.2.14.1")); ck_assert(a.is_v4()); ck_assert(a.is_v6() == false); } END_TEST // Verify that link local address without scope ID is parsed // properly. START_TEST(test_make_address_v6_link_local) { auto a(gu::make_address("fe80::fc87:f2ff:fe85:6ba6")); ck_assert(a.is_v4() == false); ck_assert(a.is_v6()); ck_assert(a.to_v6().scope_id() == 0); ck_assert(a.to_v6().is_link_local()); a = gu::make_address("[fe80::fc87:f2ff:fe85:6ba6]"); ck_assert(a.is_v4() == false); ck_assert(a.is_v6()); ck_assert(a.to_v6().scope_id() == 0); ck_assert(a.to_v6().is_link_local()); } END_TEST // Verify that link local address with scope ID is parsed // properly. START_TEST(test_make_address_v6_link_local_with_scope_id) { auto a(gu::make_address("fe80::fc87:f2ff:fe85:6ba6%1")); ck_assert(a.is_v4() == false); ck_assert(a.is_v6()); ck_assert(a.to_v6().scope_id() == 1); a = gu::make_address("[fe80::fc87:f2ff:fe85:6ba6%1]"); ck_assert(a.is_v4() == false); ck_assert(a.is_v6()); ck_assert(a.to_v6().scope_id() == 1); } END_TEST START_TEST(test_const_buffer) { const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); ck_assert(cbs[0].size() == 3); ck_assert(cbs[1].size() == 4); } END_TEST START_TEST(test_error_code_success) { gu::AsioErrorCode ec(gu::AsioErrorCode(0)); ck_assert(not ec); } END_TEST START_TEST(test_error_code_error) { gu::AsioErrorCode ec(gu::AsioErrorCode(1)); ck_assert(ec); } END_TEST START_TEST(test_io_service) { gu::AsioIoService io_service; } END_TEST START_TEST(test_tcp_socket) { gu::AsioIoService io_service; auto socket(io_service.make_socket(gu::URI("tcp://127.0.0.1:0"))); } END_TEST template void test_socket_receive_buffer_size_unopened_common(Socket& socket) { try { (void)socket.get_receive_buffer_size(); ck_abort_msg("Exception not thrown when calling get receive buffer " "for closed socket"); } catch (const gu::Exception&) { } try { socket.set_receive_buffer_size(1 << 16); ck_abort_msg("Exception not thrown when calling get receive buffer " "for closed socket"); } catch (const gu::Exception&) { } } START_TEST(test_tcp_socket_receive_buffer_size_unopened) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_receive_buffer_size_unopened_common(*socket); } END_TEST template void test_socket_receive_buffer_size_common(Socket& socket, const gu::URI& uri) { socket.open(uri); size_t default_size(socket.get_receive_buffer_size()); socket.set_receive_buffer_size(default_size/2); ck_assert(socket.get_receive_buffer_size() == default_size/2); } START_TEST(test_tcp_socket_receive_buffer_size) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_receive_buffer_size_common(*socket, uri); } END_TEST template void test_socket_send_buffer_size_unopened_common(Socket& socket) { try { (void)socket.get_send_buffer_size(); ck_abort_msg("Exception not thrown when calling get send buffer " "for closed socket"); } catch (const gu::Exception&) { } try { socket.set_send_buffer_size(1 << 16); ck_abort_msg("Exception not thrown when calling get send buffer " "for closed socket"); } catch (const gu::Exception&) { } } START_TEST(test_tcp_socket_send_buffer_size_unopened) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_send_buffer_size_unopened_common(*socket); } END_TEST template void test_socket_send_buffer_size_common(Socket& socket, const gu::URI& uri) { socket.open(uri); size_t default_size(socket.get_send_buffer_size()); socket.set_send_buffer_size(default_size/2); ck_assert(socket.get_send_buffer_size() == default_size/2); } START_TEST(test_tcp_socket_send_buffer_size) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_send_buffer_size_common(*socket, uri); } END_TEST START_TEST(test_tcp_read_unopened) { gu::AsioIoService io_service; auto socket(io_service.make_socket(gu::URI("tcp://"))); auto socket_handler(std::make_shared()); try { char b; gu::AsioMutableBuffer mb(&b, 1); socket->async_read(mb, socket_handler); ck_abort_msg("Exception not thrown"); } catch (const gu::Exception&) { } } END_TEST START_TEST(test_tcp_write_unopened) { gu::AsioIoService io_service; auto socket(io_service.make_socket(gu::URI("tcp://"))); auto socket_handler(std::make_shared()); try { std::array cbs; cbs[0] = gu::AsioConstBuffer("1", 1); cbs[1] = gu::AsioConstBuffer(); socket->async_write(cbs, socket_handler); ck_abort_msg("Exception not thrown"); } catch (const gu::Exception&) { } } END_TEST START_TEST(test_tcp_acceptor) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); } END_TEST START_TEST(test_tcp_acceptor_listen) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); auto listen_addr(acceptor->listen_addr()); ck_assert(listen_addr.find("tcp://127.0.0.1") != std::string::npos); } END_TEST START_TEST(test_tcp_acceptor_listen_already_bound) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); auto listen_addr(acceptor->listen_addr()); ck_assert(listen_addr.find("tcp://127.0.0.1") != std::string::npos); auto acceptor2(io_service.make_acceptor(acceptor->listen_addr())); try { acceptor2->listen(acceptor->listen_addr()); ck_abort_msg("Exception not thrown for address already in use"); } catch (const gu::Exception& e) { ck_assert(e.get_errno() == EADDRINUSE); } } END_TEST START_TEST(test_tcp_acceptor_receive_buffer_size_unopened) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); try { (void)acceptor->get_receive_buffer_size(); ck_abort_msg("Exception not thrown when calling get receive buffer " "for closed acceptor"); } catch (const gu::Exception&) { } try { acceptor->set_receive_buffer_size(1 << 16); ck_abort_msg("Exception not thrown when calling get receive buffer " "for closed acceptor"); } catch (const gu::Exception&) { } } END_TEST START_TEST(test_tcp_acceptor_receive_buffer_size) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); acceptor->open(uri); size_t default_size(acceptor->get_receive_buffer_size()); acceptor->set_receive_buffer_size(default_size/2); ck_assert(acceptor->get_receive_buffer_size() == default_size/2); } END_TEST START_TEST(test_tcp_acceptor_send_buffer_size_unopened) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); try { (void)acceptor->get_send_buffer_size(); ck_abort_msg("Exception not thrown when calling get send buffer " "for closed acceptor"); } catch (const gu::Exception&) { } try { acceptor->set_send_buffer_size(1 << 16); ck_abort_msg("Exception not thrown when calling get send buffer " "for closed acceptor"); } catch (const gu::Exception&) { } } END_TEST START_TEST(test_tcp_acceptor_send_buffer_size) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); acceptor->open(uri); size_t default_size(acceptor->get_send_buffer_size()); acceptor->set_send_buffer_size(default_size/2); ck_assert(acceptor->get_send_buffer_size() == default_size/2); } END_TEST void wait_handshake_ready(gu::AsioIoService& io_service, MockAcceptorHandler& acceptor_handler, MockSocketHandler& socket_handler) { while (not(acceptor_handler.accepted_socket() && acceptor_handler.accepted_handler()->connect_handler_called() && socket_handler.connect_handler_called())) { io_service.run_one(); } } template void test_connect_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); wait_handshake_ready(io_service, acceptor_handler, *handler); auto accepted_socket(acceptor_handler.accepted_socket()); ck_assert_msg(acceptor.listen_addr() == accepted_socket->local_addr(), "%s != %s", acceptor.listen_addr().c_str(), accepted_socket->local_addr().c_str()); ck_assert(socket->local_addr() == accepted_socket->remote_addr()); ck_assert(socket->remote_addr() == accepted_socket->local_addr()); } START_TEST(test_tcp_connect) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_tcp_connect_twice) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); acceptor_handler->reset(); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); } END_TEST template void test_async_read_write_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); wait_handshake_ready(io_service, acceptor_handler, *handler); const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); socket->async_write(cbs, handler); while (handler->bytes_written() != strlen(hdr) + strlen(data)) { io_service.run_one(); } auto accepted_socket(acceptor_handler.accepted_socket()); auto accepted_socket_handler(acceptor_handler.accepted_handler()); char read_buf[7] = {0}; accepted_socket_handler->expect_read(sizeof(read_buf)); accepted_socket->async_read(gu::AsioMutableBuffer( read_buf, sizeof(read_buf)), accepted_socket_handler); while (accepted_socket_handler->bytes_read() != strlen(hdr) + strlen(data)) { io_service.run_one(); } ck_assert(strncmp(read_buf, "hdrdata", sizeof(read_buf)) == 0); } START_TEST(test_tcp_async_read_write) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_common(io_service, *acceptor, *acceptor_handler); } END_TEST template void test_async_read_write_large_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); wait_handshake_ready(io_service, acceptor_handler, *handler); const char* hdr("hdr"); gu::Buffer data(1 << 23); std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data.data(), data.size()); socket->async_write(cbs, handler); auto accepted_socket(acceptor_handler.accepted_socket()); auto accepted_socket_handler(acceptor_handler.accepted_handler()); gu::Buffer read_buf(3 + data.size()); accepted_socket_handler->expect_read(read_buf.size()); accepted_socket->async_read(gu::AsioMutableBuffer( &read_buf[0], read_buf.size()), accepted_socket_handler); while (handler->bytes_written() != 3 + data.size() && accepted_socket_handler->bytes_read() != read_buf.size()) { io_service.run_one(); } } START_TEST(test_tcp_async_read_write_large) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_large_common(io_service, *acceptor, *acceptor_handler); } END_TEST template void test_async_read_write_small_large_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); mark_point(); wait_handshake_ready(io_service, acceptor_handler, *handler); const char* hdr("hdr"); gu::Buffer data(10); const size_t small_message_size(3 + data.size()); std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data.data(), data.size()); socket->async_write(cbs, handler); mark_point(); size_t tot_bytes_written(small_message_size); while (handler->bytes_written() != tot_bytes_written) { io_service.run_one(); } data.resize(1 << 16); const size_t large_message_size(3 + data.size()); cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data.data(), data.size()); socket->async_write(cbs, handler); mark_point(); tot_bytes_written += large_message_size; while (handler->bytes_written() != tot_bytes_written) { io_service.run_one(); } auto accepted_socket(acceptor_handler.accepted_socket()); auto accepted_socket_handler(acceptor_handler.accepted_handler()); // Read buffer with size to hold one message at the time. This will // cause partial read to happen and async_read() needs to be called // twice to transfer all. gu::Buffer read_buf(large_message_size); accepted_socket_handler->expect_read(small_message_size); accepted_socket->async_read(gu::AsioMutableBuffer( &read_buf[0], read_buf.size()), accepted_socket_handler); mark_point(); while (accepted_socket_handler->bytes_read() < small_message_size) { io_service.run_one(); } ck_assert(::memcmp(read_buf.data(), "hdr", 3) == 0); // Consume the first message from the buffer and restart read. memmove(&read_buf[0], &read_buf[0] + small_message_size, accepted_socket_handler->bytes_read() - small_message_size); accepted_socket_handler->consume(small_message_size); accepted_socket_handler->expect_read(large_message_size); accepted_socket->async_read( gu::AsioMutableBuffer( &read_buf[0] + accepted_socket_handler->bytes_read(), read_buf.size() - accepted_socket_handler->bytes_read()), accepted_socket_handler); mark_point(); while (accepted_socket_handler->bytes_read() != large_message_size) { io_service.run_one(); } assert(::memcmp(read_buf.data(), "hdr", 3) == 0); } START_TEST(test_tcp_async_read_write_small_large) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_small_large_common(io_service, *acceptor, *acceptor_handler); } END_TEST static void test_async_read_from_client_write_from_server_common( gu::AsioIoService& io_service, gu::AsioAcceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); wait_handshake_ready(io_service, acceptor_handler, *handler); const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); auto accepted_socket(acceptor_handler.accepted_socket()); auto accepted_socket_handler(acceptor_handler.accepted_handler()); accepted_socket->async_write(cbs, accepted_socket_handler); while (accepted_socket_handler->bytes_written() != strlen(hdr) + strlen(data)) { io_service.run_one(); } char read_buf[7] = {0}; handler->expect_read(sizeof(read_buf)); socket->async_read(gu::AsioMutableBuffer(read_buf, sizeof(read_buf)), handler); while (handler->bytes_read() != strlen(hdr) + strlen(data)) { io_service.run_one(); } ck_assert(strncmp(read_buf, "hdrdata", sizeof(read_buf)) == 0); } START_TEST(test_tcp_async_read_from_client_write_from_server) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_from_client_write_from_server_common(io_service, *acceptor, *acceptor_handler); } END_TEST template void test_write_twice_wo_handling_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); while (not (acceptor_handler.accepted_socket() && handler->connect_handler_called())) { io_service.run_one(); } const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); socket->async_write(cbs, handler); try { socket->async_write(cbs, handler); ck_abort_msg("Exception not thrown"); } catch (const gu::Exception& e) { ck_assert(e.get_errno() == EBUSY); } } // Verify that trying to write twice without waiting for // write handler to be called will throw error. START_TEST(test_tcp_write_twice_wo_handling) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_write_twice_wo_handling_common(io_service, *acceptor, *acceptor_handler); } END_TEST void test_close_client_common(gu::AsioIoService& io_service, gu::AsioAcceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); wait_handshake_ready(io_service, acceptor_handler, *handler); socket->close(); char readbuf[1]; acceptor_handler.accepted_socket()->async_read( gu::AsioMutableBuffer(readbuf, 1), acceptor_handler.accepted_handler()); // Wait until socket closes. while (not acceptor_handler.accepted_handler()->last_error_code()) { io_service.run_one(); } } START_TEST(test_tcp_close_client) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_close_client_common(io_service, *acceptor, *acceptor_handler); } END_TEST void test_close_server_common(gu::AsioIoService& io_service, gu::AsioAcceptor& acceptor, MockAcceptorHandler& acceptor_handler) { auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); while (not (acceptor_handler.accepted_socket() && handler->connect_handler_called())) { io_service.run_one(); } acceptor_handler.accepted_socket()->close(); char readbuf[1]; socket->async_read(gu::AsioMutableBuffer(readbuf, 1), handler); // Wait until socket closes. while (not handler->last_error_code()) { io_service.run_one(); } } START_TEST(test_tcp_close_server) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_close_server_common(io_service, *acceptor, *acceptor_handler); } END_TEST template void test_get_tcp_info_common(gu::AsioIoService& io_service, Acceptor& acceptor, MockAcceptorHandler& acceptor_handler) { // Make first socket connected auto handler(std::make_shared()); auto socket(io_service.make_socket(acceptor.listen_addr())); socket->async_connect(acceptor.listen_addr(), handler); while (not (acceptor_handler.accepted_socket() && handler->connect_handler_called())) { io_service.run_one(); } (void)socket->get_tcp_info(); } START_TEST(test_tcp_get_tcp_info) { gu::AsioIoService io_service; gu::URI uri("tcp://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_get_tcp_info_common(io_service, *acceptor, *acceptor_handler); } END_TEST #ifdef GALERA_HAVE_SSL #include #include #include #include #include #include #include #include #include #include #include #include static std::string get_cert_dir() { assert(::strlen(GU_ASIO_TEST_CERT_DIR) > 0); const std::string ret{ GU_ASIO_TEST_CERT_DIR }; auto* dir = opendir(ret.c_str()); if (!dir) { if (mkdir(ret.c_str(), S_IRWXU)) { const auto* errstr = ::strerror(errno); gu_throw_fatal << "Could not create dir " << ret << ": " << errstr; } } else { closedir(dir); } return GU_ASIO_TEST_CERT_DIR; } static int password_cb(char*, int, int, void*) { return 0; } static void throw_error(const char* msg) { gu_throw_fatal << msg << ": " << ERR_error_string(ERR_get_error(), nullptr); } static EVP_PKEY* create_key() { #if OPENSSL_VERSION_MAJOR < 3 auto* bn = BN_new(); if (!bn) { throw_error("could not create BN"); } BN_set_word(bn, 0x10001); auto* rsa = RSA_new(); if (!rsa) { BN_free(bn); throw_error("could not create RSA"); } RSA_generate_key_ex(rsa, 2048, bn, nullptr); auto* pkey = EVP_PKEY_new(); if (!pkey) { BN_free(bn); RSA_free(rsa); throw_error("could not create PKEY"); } EVP_PKEY_set1_RSA(pkey, rsa); RSA_free(rsa); BN_free(bn); return pkey; #else auto* ret = EVP_RSA_gen(2048); if (!ret) { throw_error("could not create RSA"); } return ret; #endif /* OPENSSL_VERSION_MAJOR < 3 */ } static FILE* open_file(const std::string& path, const char* mode) { auto* ret = fopen(path.c_str(), mode); if (!ret) { const auto* errstr = ::strerror(errno); gu_throw_fatal << "Could not open file " << path << ": " << errstr; } return ret; } static void write_key(EVP_PKEY* pkey, const std::string& filename) { const std::string cert_dir = get_cert_dir(); const std::string key_file_path = cert_dir + "/" + filename; auto* key_file = open_file(key_file_path, "wb"); if (!PEM_write_PrivateKey(key_file, pkey, nullptr, nullptr, 0, password_cb, nullptr)) { throw_error("Could not write key"); } fclose(key_file); } static void set_x509v3_extensions(X509* x509, X509* issuer, bool const is_ca) { auto* conf_bio = BIO_new(BIO_s_mem()); std::string ext{ "[extensions]\n" "authorityKeyIdentifier=keyid,issuer\n" "subjectKeyIdentifier=hash\n" }; if (is_ca) { ext += "basicConstraints=critical,CA:TRUE\n"; } else { ext += "keyUsage=digitalSignature,keyEncipherment\n"; ext += "basicConstraints=CA:FALSE\n"; } BIO_printf(conf_bio, "%s", ext.c_str()); auto* conf = NCONF_new(nullptr); long errorline = -1; int err; if ((err = NCONF_load_bio(conf, conf_bio, &errorline)) <= 0) { gu_throw_fatal << "Could not load conf: " << err; } if (errorline != -1) { gu_throw_fatal << "Could not load conf, errorline: " << errorline; } // TODO: V3 extensions X509V3_CTX ctx; X509V3_set_ctx(&ctx, issuer ? issuer : x509, x509, nullptr, nullptr, 0); X509V3_set_nconf(&ctx, conf); char extensions[16]; ::strncpy(extensions, "extensions", sizeof(extensions)); if (!X509V3_EXT_add_nconf(conf, &ctx, extensions, x509)) { throw_error("Could not add extension"); } NCONF_free(conf); BIO_free(conf_bio); } static X509* create_x509(EVP_PKEY* pkey, X509* issuer, const char* cn, bool const is_ca) { auto* x509 = X509_new(); /* According to standard, value 2 means version 3. */ X509_set_version(x509, 2); ASN1_INTEGER_set(X509_get_serialNumber(x509), 1); X509_gmtime_adj(X509_get_notBefore(x509), 0); X509_gmtime_adj(X509_get_notAfter(x509), 31536000L); X509_set_pubkey(x509, pkey); auto* name = X509_get_subject_name(x509); static const unsigned char C_str [] = "FI"; static const unsigned char ST_str[] = "Uusimaa"; static const unsigned char L_str [] = "Helsinki"; static const unsigned char O_str [] = "Codership"; static const unsigned char OU_str[] = "Galera Devel"; X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, C_str, -1, -1, 0); X509_NAME_add_entry_by_txt(name, "ST", MBSTRING_ASC, ST_str, -1, -1, 0); X509_NAME_add_entry_by_txt(name, "L", MBSTRING_ASC, L_str, -1, -1, 0); X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC, O_str, -1, -1, 0); X509_NAME_add_entry_by_txt(name, "OU", MBSTRING_ASC, OU_str, -1, -1, 0); X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC, reinterpret_cast(cn), -1, -1, 0); if (!issuer) { /* Self signed */ X509_set_issuer_name(x509, name); } else { X509_set_issuer_name(x509, X509_get_subject_name(issuer)); } set_x509v3_extensions(x509, issuer, is_ca); X509_sign(x509, pkey, EVP_sha256()); return x509; } static void write_x509(X509* x509, const std::string& filename) { const std::string cert_dir = get_cert_dir(); const std::string file_path = cert_dir + "/" + filename; auto* file = open_file(file_path, "wb"); if (!PEM_write_X509(file, x509)) { throw_error("Could not write x509"); } fclose(file); } static void write_x509_list(const std::vector& certs, const std::string& filename) { const std::string cert_dir = get_cert_dir(); const std::string file_path = cert_dir + "/" + filename; auto* file = open_file(file_path, "wb"); for (auto* x509 : certs) { if (!PEM_write_X509(file, x509)) { throw_error("Could not write x509"); } } fclose(file); } /* Self signed CA + certificate */ static void generate_self_signed() { auto* pkey = create_key(); write_key(pkey, "galera_key.pem"); auto* ca = create_x509(pkey, nullptr, "Galera Root", true); write_x509(ca, "galera_ca.pem"); auto* cert = create_x509(pkey, ca, "Galera Cert", false); write_x509(cert, "galera_cert.pem"); X509_free(cert); X509_free(ca); EVP_PKEY_free(pkey); } /* ---- Server cert 1 / Root CA - Intermediate CA \---- Server cert 2 Two bundles consisting of intermediate CA and server certificate are created for servers 1 and 2. */ static void generate_self_signed_chains() { auto* sign_key = create_key(); auto* root_ca = create_x509(sign_key, nullptr, "Galera Root CA", true); auto* int_ca = create_x509(sign_key, root_ca, "Galera Intermediate CA", true); auto* server_1_cert = create_x509(sign_key, int_ca, "Galera Server 1", false); auto* server_2_cert = create_x509(sign_key, int_ca, "Galera Server 2", false); write_x509(root_ca, "galera-ca.pem"); write_key(sign_key, "galera-server-1.key"); write_x509_list({ server_1_cert, int_ca }, "bundle-galera-server-1.pem"); write_key(sign_key, "galera-server-2.key"); write_x509_list({ server_2_cert, int_ca }, "bundle-galera-server-2.pem"); X509_free(server_2_cert); X509_free(server_1_cert); X509_free(int_ca); X509_free(root_ca); EVP_PKEY_free(sign_key); } static void generate_certificates() { #if OPENSSL_VERSION_NUMBER < 0x30004000L #ifdef OPENSSL_INIT_LOAD_SSL_STRINGS OPENSSL_init_ssl(OPENSSL_INIT_LOAD_SSL_STRINGS, NULL); #endif #endif generate_self_signed(); generate_self_signed_chains(); } // // SSL // static gu::Config get_ssl_config() { gu::Config ret; gu::ssl_register_params(ret); std::string cert_dir(get_cert_dir()); ret.set(gu::conf::use_ssl, "1"); ret.set(gu::conf::ssl_key, cert_dir + "/galera_key.pem"); ret.set(gu::conf::ssl_cert, cert_dir + "/galera_cert.pem"); ret.set(gu::conf::ssl_ca, cert_dir + "/galera_ca.pem"); gu::ssl_init_options(ret); // Block SIGPIPE in SSL tests. OpenSSL calls may cause // signal to be generated. struct sigaction sa; ::memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigaction(SIGPIPE, &sa, 0); return ret; } START_TEST(test_ssl_io_service) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); } END_TEST START_TEST(test_ssl_socket) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); gu::URI uri("ssl://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); } END_TEST START_TEST(test_ssl_socket_receive_buffer_unopened) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); gu::URI uri("ssl://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_receive_buffer_size_unopened_common(*socket); } END_TEST START_TEST(test_ssl_socket_receive_buffer_size) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); gu::URI uri("ssl://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_receive_buffer_size_common(*socket, uri); } END_TEST START_TEST(test_ssl_socket_send_buffer_unopened) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); gu::URI uri("ssl://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_send_buffer_size_unopened_common(*socket); } END_TEST START_TEST(test_ssl_socket_send_buffer_size) { auto conf(get_ssl_config()); gu::AsioIoService io_service(conf); gu::URI uri("ssl://127.0.0.1:0"); auto socket(io_service.make_socket(uri)); test_socket_send_buffer_size_common(*socket, uri); } END_TEST START_TEST(test_ssl_acceptor) { gu::AsioIoService io_service; gu::URI uri("ssl://127.0.0.1:0"); auto acceptor(io_service.make_acceptor(uri)); } END_TEST START_TEST(test_ssl_connect) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_connect_twice) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); acceptor_handler->next_stream_engine = nullptr; auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); acceptor_handler->reset(); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_connect_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_async_read_write) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_async_read_write_large) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_large_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_async_read_write_small_large) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_small_large_common( io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_async_read_from_client_write_from_server) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_from_client_write_from_server_common( io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_write_twice_wo_handling) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_write_twice_wo_handling_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_close_client) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_close_client_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_close_server) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_close_server_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_get_tcp_info) { gu::AsioIoService io_service(get_ssl_config()); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_get_tcp_info_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_compression_option) { auto config(get_ssl_config()); config.set("socket.ssl_compression", true); gu::AsioIoService io_service(config); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_common(io_service, *acceptor, *acceptor_handler); } END_TEST START_TEST(test_ssl_cipher) { auto config(get_ssl_config()); config.set("socket.ssl_cipher", "AES256-SHA"); gu::AsioIoService io_service(config); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor_handler(std::make_shared()); auto acceptor(io_service.make_acceptor(uri)); acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); test_async_read_write_common(io_service, *acceptor, *acceptor_handler); } END_TEST static gu::Config get_ssl_chain_config(int index) { gu::Config ret; gu::ssl_register_params(ret); std::string cert_dir(get_cert_dir()); ret.set(gu::conf::use_ssl, "1"); ret.set(gu::conf::ssl_key, cert_dir + "/galera-server-" + gu::to_string(index) + ".key"); ret.set(gu::conf::ssl_cert, cert_dir + "/bundle-galera-server-" + gu::to_string(index) + ".pem"); ret.set(gu::conf::ssl_ca, cert_dir + "/galera-ca.pem"); gu::ssl_init_options(ret); // Block SIGPIPE in SSL tests. OpenSSL calls may cause // signal to be generated. struct sigaction sa; ::memset(&sa, 0, sizeof(sa)); sa.sa_handler = SIG_IGN; sigaction(SIGPIPE, &sa, 0); return ret; } START_TEST(test_ssl_certificate_chain) { auto client_conf(get_ssl_chain_config(1)); gu::AsioIoService client_io_service(client_conf); auto server_conf(get_ssl_chain_config(2)); gu::AsioIoService server_io_service(server_conf); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor(server_io_service.make_acceptor(uri)); acceptor->listen(uri); auto acceptor_handler(std::make_shared()); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); auto handler(std::make_shared("client")); auto socket(client_io_service.make_socket(acceptor->listen_addr())); socket->async_connect(acceptor->listen_addr(), handler); client_io_service.run_one(); // Process async connect server_io_service.run_one(); // Accept client_io_service.run_one(); // Client hello client_io_service.run_one(); // Client hello IO completion while ( not(handler->connect_handler_called() && acceptor_handler->accepted_handler()->connect_handler_called())) { client_io_service.poll_one(); server_io_service.poll_one(); } ck_assert(!handler->last_error_code()); } END_TEST // This test uses certificate chain for server and self signed // certificate for client. They do not have common trusted CA, // so the connection should be rejected. START_TEST(test_ssl_invalid_cert) { auto client_conf(get_ssl_config()); gu::AsioIoService client_io_service(client_conf); auto server_conf(get_ssl_chain_config(2)); gu::AsioIoService server_io_service(server_conf); gu::URI uri("ssl://127.0.0.1:0"); auto acceptor(server_io_service.make_acceptor(uri)); acceptor->listen(uri); auto acceptor_handler(std::make_shared()); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler); auto handler(std::make_shared()); auto socket(client_io_service.make_socket(acceptor->listen_addr())); socket->async_connect(acceptor->listen_addr(), handler); client_io_service.run_one(); // Process async connect server_io_service.run_one(); // Accept client_io_service.run_one(); // Client hello client_io_service.run_one(); // Client hello IO completion // server_io_service.run_one(); // Server handles while (not handler->last_error_code()) { client_io_service.poll_one(); server_io_service.poll_one(); } ck_assert_msg(handler->last_error_code().message().find( "unable to get local issuer certificate") != std::string::npos, "verify error 'unable to get local issuer certificate' " "not found from '%s'", handler->last_error_code().message().c_str()); } END_TEST #endif // GALERA_HAVE_SSL // // Wsrep TLS service. // struct TlsServiceClientTestFixture { gu::AsioIoService server_io_service; std::shared_ptr client_engine; std::shared_ptr server_engine; gu::AsioIoService client_io_service; gu::URI uri; std::shared_ptr acceptor; std::shared_ptr acceptor_handler; std::shared_ptr socket; std::shared_ptr socket_handler; TlsServiceClientTestFixture() : server_io_service() , client_engine(std::make_shared()) , server_engine(std::make_shared()) , client_io_service(gu::Config()) , uri("tcp://127.0.0.1:0") , acceptor(server_io_service.make_acceptor(uri)) , acceptor_handler(std::make_shared()) , socket(client_io_service.make_socket(uri, client_engine)) , socket_handler(std::make_shared()) { acceptor->listen(uri); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler, server_engine); socket->async_connect(acceptor->listen_addr(), socket_handler); while (not( acceptor_handler->accepted_socket() && acceptor_handler->accepted_handler()->connect_handler_called())) { server_io_service.run_one(); } } void run_client_while(const std::function& pred) { while (pred()) { client_io_service.run_one(); } } }; START_TEST(test_client_handshake_want_read) { TlsServiceClientTestFixture f; f.client_engine->next_result = gu::AsioStreamEngine::want_read; // Write to accepted socket to make connected socket readable std::array cbs; cbs[0] = gu::AsioConstBuffer("serv", 4); cbs[1] = gu::AsioConstBuffer(); f.acceptor_handler->accepted_socket()->async_write( cbs, f.acceptor_handler->accepted_handler()); f.server_io_service.run_one(); f.run_client_while( [&f] { return f.client_engine->count_client_handshake_called < 2; }); } END_TEST START_TEST(test_client_handshake_want_write) { TlsServiceClientTestFixture f; f.client_engine->next_result = gu::AsioStreamEngine::want_write; f.run_client_while( [&f] { return f.client_engine->count_client_handshake_called < 2; }); } END_TEST START_TEST(test_client_handshake_eof) { TlsServiceClientTestFixture f; f.client_engine->next_result = gu::AsioStreamEngine::eof; f.client_io_service.run_one(); f.client_io_service.run_one(); // IO completion ck_assert(f.socket_handler->connect_handler_called()); ck_assert(f.socket_handler->last_error_code().is_eof()); ck_assert(f.client_engine->count_client_handshake_called == 1); } END_TEST START_TEST(test_client_handshake_eof2) { TlsServiceClientTestFixture f; // First op causes connect handler to restart client handshake // call. The EOF will now returned in client handshake handler. f.client_engine->next_result = gu::AsioStreamEngine::want_write; f.client_io_service.run_one(); f.client_io_service.run_one(); // IO completion f.client_engine->next_result = gu::AsioStreamEngine::eof; f.client_io_service.run_one(); ck_assert(f.socket_handler->connect_handler_called()); ck_assert(f.socket_handler->last_error_code().is_eof()); ck_assert(f.client_engine->count_client_handshake_called == 2); } END_TEST START_TEST(test_client_handshake_error) { TlsServiceClientTestFixture f; f.client_engine->next_result = gu::AsioStreamEngine::error; f.client_engine->next_error = EPIPE; f.client_io_service.run_one(); f.client_io_service.run_one(); // IO completion ck_assert(f.socket_handler->connect_handler_called()); ck_assert(f.socket_handler->last_error_code().value() == EPIPE); ck_assert(f.client_engine->count_client_handshake_called == 1); } END_TEST START_TEST(test_client_handshake_error2) { TlsServiceClientTestFixture f; // First op causes connect handler to restart client handshake // call. The error will now returned in client handshake handler. f.client_engine->next_result = gu::AsioStreamEngine::want_write; f.client_io_service.run_one(); f.client_io_service.run_one(); // IO completion f.client_engine->next_result = gu::AsioStreamEngine::error; f.client_engine->next_error = EPIPE; f.client_io_service.run_one(); ck_assert(f.socket_handler->connect_handler_called()); ck_assert(f.socket_handler->last_error_code().value() == EPIPE); ck_assert(f.client_engine->count_client_handshake_called == 2); } END_TEST struct TlsServiceServerTestFixture { gu::AsioIoService server_io_service; gu::AsioIoService client_io_service; gu::URI uri; std::shared_ptr acceptor; std::shared_ptr acceptor_handler; std::shared_ptr socket; std::shared_ptr socket_handler; TlsServiceServerTestFixture() : server_io_service(gu::Config()) , client_io_service() , uri("tcp://127.0.0.1:0") , acceptor(server_io_service.make_acceptor(uri)) , acceptor_handler(std::make_shared()) , socket() , socket_handler() { acceptor->listen(uri); /* Override stream engine for tests to be able to do error injection. */ acceptor_handler->next_stream_engine = std::make_shared(); acceptor->async_accept(acceptor_handler, acceptor_handler->next_socket_handler, acceptor_handler->next_stream_engine); run_async_connect(); } void run_async_connect() { socket_handler = nullptr; socket_handler = std::make_shared(); socket = client_io_service.make_socket( uri, std::make_shared()); socket->async_connect(acceptor->listen_addr(), socket_handler); client_io_service.run_one(); client_io_service.run_one(); // IO completion // client_io_service runs out of work. Reset to make // followig calls succeed client_io_service.reset(); } void complete_server_handshake() { server_io_service.run_one(); server_io_service.run_one(); } void run_server_while(const std::function& pred) { while (pred()) { server_io_service.run_one(); } } void run_client_while(const std::function& pred) { while (pred()) { client_io_service.run_one(); } } }; START_TEST(test_server_handshake_want_read) { TlsServiceServerTestFixture f; f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::want_read; f.run_server_while( [&f]() { return not f.acceptor_handler->cur_stream_engine || f.acceptor_handler->cur_stream_engine ->count_server_handshake_called < 1; }); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 1); // Write to connected socket to make accepted socket readable std::array cbs; cbs[0] = gu::AsioConstBuffer("clie", 4); cbs[1] = gu::AsioConstBuffer(); f.socket->async_write(cbs, f.socket_handler); f.run_client_while( [&f]() { return f.socket_handler->bytes_written() < 4; }); f.run_server_while( [&f]() { return f.acceptor_handler->cur_stream_engine->count_server_handshake_called < 2; }); } END_TEST START_TEST(test_server_handshake_want_write) { TlsServiceServerTestFixture f; f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::want_write; f.run_server_while( [&f]() { return not f.acceptor_handler->cur_stream_engine || f.acceptor_handler->cur_stream_engine ->count_server_handshake_called < 2; }); } END_TEST START_TEST(test_server_handshake_eof) { TlsServiceServerTestFixture f; f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::eof; f.server_io_service.run_one(); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 1); } END_TEST START_TEST(test_server_handshake_eof2) { TlsServiceServerTestFixture f; // First op causes accept handler to restart server handshake call. // The EOF will now handled in server handshake handler. f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::want_write; f.complete_server_handshake(); ck_assert(f.acceptor_handler->cur_stream_engine != nullptr); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::eof; f.server_io_service.run_one(); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 2); } END_TEST START_TEST(test_server_handshake_error) { TlsServiceServerTestFixture f; f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::error; f.acceptor_handler->next_stream_engine->next_error = EPIPE; f.complete_server_handshake(); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 1); } END_TEST START_TEST(test_server_handshake_error2) { TlsServiceServerTestFixture f; // First op causes accept handler to restart server handshake call. // The error will now handled in server handshake handler. f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::want_write; f.complete_server_handshake(); ck_assert(f.acceptor_handler->cur_stream_engine != nullptr); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::error; f.acceptor_handler->cur_stream_engine->next_error = EPIPE; f.server_io_service.run_one(); ck_assert_int_eq(f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 2); } END_TEST START_TEST(test_accept_after_server_handshake_error) { TlsServiceServerTestFixture f; f.acceptor_handler->next_stream_engine->next_result = gu::AsioStreamEngine::error; f.acceptor_handler->next_stream_engine->next_error = EPIPE; f.complete_server_handshake(); ck_assert(f.acceptor_handler->cur_stream_engine != nullptr); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 1); f.acceptor_handler->cur_stream_engine->next_error = 0; f.run_async_connect(); f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != nullptr); ck_assert_int_eq( f.acceptor_handler->cur_stream_engine->count_server_handshake_called, 1); } END_TEST START_TEST(test_read_want_read) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.socket->async_write(cbs, f.socket_handler); f.client_io_service.run_one(); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::want_read; std::array buf; f.acceptor_handler->accepted_socket()->async_read( gu::AsioMutableBuffer(buf.data(), buf.size()), f.acceptor_handler->accepted_handler()); f.run_server_while([&f]() { return f.acceptor_handler->cur_stream_engine->count_read_called < 1; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == 1); ck_assert(f.acceptor_handler->accepted_handler()->bytes_read() == 4); // Write socket to make accepted socket readable, but do not start // async read to simulate stream engine internal operation. f.socket->async_write(cbs, f.socket_handler); f.client_io_service.reset(); f.client_io_service.run_one(); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::success; const size_t expect_count_read_called = f.acceptor_handler->cur_stream_engine->count_read_called + 1; f.run_server_while( [&f, expect_count_read_called]() { return f.acceptor_handler->cur_stream_engine->count_read_called < expect_count_read_called; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == expect_count_read_called); // Extra read should just call read() but the communication should // be internal, the handler should not see received data. ck_assert(f.acceptor_handler->accepted_handler()->bytes_read() == 4); } END_TEST START_TEST(test_read_want_write) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.socket->async_write(cbs, f.socket_handler); f.client_io_service.run_one(); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::want_write; std::array buf; f.acceptor_handler->accepted_socket()->async_read( gu::AsioMutableBuffer(buf.data(), buf.size()), f.acceptor_handler->accepted_handler()); const size_t expect_count_read_called = f.acceptor_handler->cur_stream_engine->count_read_called + 1; f.run_server_while( [&f, expect_count_read_called]() { return f.acceptor_handler->cur_stream_engine->count_read_called < expect_count_read_called; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == expect_count_read_called); ck_assert(f.acceptor_handler->accepted_handler()->bytes_read() == 4); f.run_server_while( [&f, expect_count_read_called]() { return f.acceptor_handler->cur_stream_engine->count_read_called < expect_count_read_called + 1; }); // The result want_write means that the previous operation // (in this case read) must be called once again once the // socket becomes writable. ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == expect_count_read_called + 1); } END_TEST START_TEST(test_read_eof) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); f.socket->close(); std::array buf; f.acceptor_handler->accepted_socket()->async_read( gu::AsioMutableBuffer(buf.data(), buf.size()), f.acceptor_handler->accepted_handler()); f.run_server_while( [&f]() { return f.acceptor_handler->cur_stream_engine->count_read_called < 1; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == 1); ck_assert( f.acceptor_handler->accepted_handler()->last_error_code().is_eof()); } END_TEST START_TEST(test_read_error) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); // Socket close makes the socket readable, but we override // the return value with error. f.socket->close(); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::error; f.acceptor_handler->cur_stream_engine->next_error = EPIPE; std::array buf; f.acceptor_handler->accepted_socket()->async_read( gu::AsioMutableBuffer(buf.data(), buf.size()), f.acceptor_handler->accepted_handler()); f.run_server_while( [&f]() { return f.acceptor_handler->cur_stream_engine->count_read_called < 1; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_read_called == 1); ck_assert(f.acceptor_handler->accepted_handler()->last_error_code().value() == EPIPE); } END_TEST START_TEST(test_write_want_read) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::want_read; std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.acceptor_handler->accepted_socket()->async_write( cbs, f.acceptor_handler->accepted_handler()); f.server_io_service.run_one(); ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 1); // Write to client socket to make server side socket readable f.socket->async_write(cbs, f.socket_handler); f.client_io_service.reset(); f.client_io_service.run_one(); ck_assert(f.socket_handler->bytes_written() == 4); // Now the server side socket should become readable and // the second call to write should happen. f.run_server_while( [&]() { return f.acceptor_handler->cur_stream_engine->count_write_called < 2; }); ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 2); } END_TEST START_TEST(test_write_want_write) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::want_write; std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.acceptor_handler->accepted_socket()->async_write( cbs, f.acceptor_handler->accepted_handler()); f.server_io_service.run_one(); ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 1); // Now the server side socket should remain writable and the // the second call to write should happen. f.run_server_while( [&f]() { return f.acceptor_handler->cur_stream_engine->count_write_called < 2; }); ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 2); } END_TEST START_TEST(test_write_eof) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::want_read; std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.acceptor_handler->accepted_socket()->async_write( cbs, f.acceptor_handler->accepted_handler()); f.server_io_service.run_one(); ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 1); // Write to client socket to make server side socket readable f.socket->async_write(cbs, f.socket_handler); f.client_io_service.reset(); f.client_io_service.run_one(); ck_assert(f.socket_handler->bytes_written() == 4); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::eof; f.run_server_while( [&f] { return f.acceptor_handler->cur_stream_engine->count_write_called < 2; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 2); ck_assert( f.acceptor_handler->accepted_handler()->last_error_code().is_eof()); } END_TEST START_TEST(test_write_error) { TlsServiceServerTestFixture f; f.complete_server_handshake(); ck_assert(f.acceptor_handler->accepted_socket() != 0); f.acceptor_handler->cur_stream_engine->next_result = gu::AsioStreamEngine::error; f.acceptor_handler->cur_stream_engine->next_error = EPIPE; std::array cbs; cbs[0] = gu::AsioConstBuffer("writ", 4); cbs[1] = gu::AsioConstBuffer(); f.acceptor_handler->accepted_socket()->async_write( cbs, f.acceptor_handler->accepted_handler()); f.run_server_while( [&f] { return f.acceptor_handler->cur_stream_engine->count_write_called < 1; }); ck_assert(f.acceptor_handler->cur_stream_engine->count_write_called == 1); // Write will succeed before the error is injected, so there will be // some bytes written. ck_assert(f.acceptor_handler->accepted_handler()->bytes_written() == 4); ck_assert(f.acceptor_handler->accepted_handler()->last_error_code().value() == EPIPE); } END_TEST // // Datagram // /* Helper to determine if UDP sockets can be opened. */ static bool have_datagram() try { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); return true; } catch (...) { return false; } class MockDatagramSocketHandler : public gu::AsioDatagramSocketHandler { public: MockDatagramSocketHandler() : gu::AsioDatagramSocketHandler() , bytes_read_() { } virtual void read_handler(gu::AsioDatagramSocket&, const gu::AsioErrorCode&, size_t bytes_transferred) GALERA_OVERRIDE { bytes_read_ += bytes_transferred; } size_t bytes_read() const { return bytes_read_; } private: size_t bytes_read_; }; START_TEST(test_datagram_socket) { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); } END_TEST START_TEST(test_datagram_open) { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); } END_TEST START_TEST(test_datagram_connect) { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); socket->connect(uri); } END_TEST START_TEST(test_datagram_open_connect) { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); socket->connect(uri); } END_TEST START_TEST(test_datagram_connect_multicast) { gu::AsioIoService io_service; gu::URI uri("udp://239.255.0.1:0"); auto socket(io_service.make_datagram_socket(uri)); socket->connect(uri); gu::URI bound_uri(socket->local_addr()); auto bound_addr(gu::make_address(bound_uri.get_host())); ck_assert(bound_addr.is_v4()); ck_assert_msg(bound_addr.to_v4().is_multicast(), "not datagram: %s", bound_uri.to_string().c_str()); } END_TEST START_TEST(test_datagram_connect_multicast_local_if) { gu::AsioIoService io_service; gu::URI uri("udp://239.255.0.1:0?socket.if_addr=127.0.0.1"); auto socket(io_service.make_datagram_socket(uri)); socket->connect(uri); gu::URI bound_uri(socket->local_addr()); auto bound_addr(gu::make_address(bound_uri.get_host())); ck_assert(bound_addr.is_v4()); ck_assert_msg(bound_addr.to_v4().is_multicast(), "not datagram: %s", bound_uri.to_string().c_str()); } END_TEST void test_datagram_send_to_and_async_read_common( gu::AsioIoService& io_service, gu::AsioDatagramSocket& socket, const std::shared_ptr& handler) { gu::URI local_uri(socket.local_addr()); const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); gu::URI udp_uri("udp://127.0.0.1:0?socket.if_addr=127.0.0.1"); auto sender_socket(io_service.make_datagram_socket(udp_uri)); sender_socket->connect(udp_uri); sender_socket->send_to(cbs, gu::make_address(local_uri.get_host()), gu::from_string( local_uri.get_port())); char read_buf[7]; socket.async_read(gu::AsioMutableBuffer(read_buf, sizeof(read_buf)), handler); while (handler->bytes_read() != sizeof(read_buf)) { io_service.run_one(); } } START_TEST(test_datagram_send_to_and_async_read) { gu::AsioIoService io_service; gu::URI uri("udp://127.0.0.1:0"); auto handler(std::make_shared()); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); socket->connect(uri); test_datagram_send_to_and_async_read_common(io_service, *socket, handler); } END_TEST START_TEST(test_datagram_send_to_and_async_read_multicast) { gu::AsioIoService io_service; gu::URI uri("udp://239.255.0.1:0?socket.if_addr=127.0.0.1"); auto handler(std::make_shared()); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); socket->connect(uri); test_datagram_send_to_and_async_read_common(io_service, *socket, handler); } END_TEST START_TEST(test_datagram_write_multicast) { gu::AsioIoService io_service; gu::URI uri("udp://239.255.0.1:0?socket.if_addr=127.0.0.1"); auto socket(io_service.make_datagram_socket(uri)); socket->open(uri); socket->connect(uri); const char* hdr = "hdr"; const char* data = "data"; std::array cbs; cbs[0] = gu::AsioConstBuffer(hdr, strlen(hdr)); cbs[1] = gu::AsioConstBuffer(data, strlen(data)); socket->write(cbs); } END_TEST // // Steady timer // class MockSteadyTimerHandler : public gu::AsioSteadyTimerHandler { public: MockSteadyTimerHandler() : gu::AsioSteadyTimerHandler() , called_() { } void handle_wait(const gu::AsioErrorCode&) { called_ = true; } bool called() const { return called_; } private: bool called_; }; START_TEST(test_steady_timer) { gu::AsioIoService io_service; auto handler(std::make_shared()); gu::AsioSteadyTimer timer(io_service); timer.expires_from_now(std::chrono::milliseconds(50)); timer.async_wait(handler); #ifdef TEST_STREADY_TIMER_CHECK_DURATION auto start(std::chrono::steady_clock::now()); #endif io_service.run_one(); #ifdef TEST_STREADY_TIMER_CHECK_DURATION auto stop(std::chrono::steady_clock::now()); #endif ck_assert(handler->called()); #ifdef TEST_STREADY_TIMER_CHECK_DURATION // Don't check duration by default. The operation sometimes take less than // 50msec for some reason. ck_assert( std::chrono::duration_cast(stop - start) >= std::chrono::milliseconds(50), "Timer duration less than 50 milliseconds %zu", std::chrono::duration_cast(stop - start) .count()); #endif } END_TEST Suite* gu_asio_suite() { Suite* s(suite_create("gu::asio")); TCase* tc; tc = tcase_create("test_make_address_v4"); tcase_add_test(tc, test_make_address_v4); suite_add_tcase(s, tc); tc = tcase_create("test_make_address_v6_link_local"); tcase_add_test(tc, test_make_address_v6_link_local); suite_add_tcase(s, tc); tc = tcase_create("test_make_address_v6_link_local_with_scope_id"); tcase_add_test(tc, test_make_address_v6_link_local_with_scope_id); suite_add_tcase(s, tc); tc = tcase_create("test_error_code_success"); tcase_add_test(tc, test_error_code_success); suite_add_tcase(s, tc); tc = tcase_create("test_error_code_error"); tcase_add_test(tc, test_error_code_error); suite_add_tcase(s, tc); tc = tcase_create("test_io_service"); tcase_add_test(tc, test_io_service); suite_add_tcase(s, tc); tc = tcase_create("test_const_buffer"); tcase_add_test(tc, test_const_buffer); suite_add_tcase(s, tc); // // TCP // tc = tcase_create("test_tcp_socket"); tcase_add_test(tc, test_tcp_socket); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_socket_receive_buffer_size_unopened"); tcase_add_test(tc, test_tcp_socket_receive_buffer_size_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_socket_receive_buffer_size"); tcase_add_test(tc, test_tcp_socket_receive_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_socket_send_buffer_size_unopened"); tcase_add_test(tc, test_tcp_socket_send_buffer_size_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_socket_send_buffer_size"); tcase_add_test(tc, test_tcp_socket_send_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_read_unopened"); tcase_add_test(tc, test_tcp_read_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_write_unopened"); tcase_add_test(tc, test_tcp_write_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor"); tcase_add_test(tc, test_tcp_acceptor); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_listen"); tcase_add_test(tc, test_tcp_acceptor_listen); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_listen_already_bound"); tcase_add_test(tc, test_tcp_acceptor_listen_already_bound); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_receive_buffer_size_unopened"); tcase_add_test(tc, test_tcp_acceptor_receive_buffer_size_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_receive_buffer_size"); tcase_add_test(tc, test_tcp_acceptor_receive_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_send_buffer_size_unopened"); tcase_add_test(tc, test_tcp_acceptor_send_buffer_size_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_acceptor_send_buffer_size"); tcase_add_test(tc, test_tcp_acceptor_send_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_connect"); tcase_add_test(tc, test_tcp_connect); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_connect_twice"); tcase_add_test(tc, test_tcp_connect_twice); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_async_read_write"); tcase_add_test(tc, test_tcp_async_read_write); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_async_read_write_large"); tcase_add_test(tc, test_tcp_async_read_write_large); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_async_read_write_small_large"); tcase_add_test(tc, test_tcp_async_read_write_small_large); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_async_read_from_client_write_from_server"); tcase_add_test(tc, test_tcp_async_read_from_client_write_from_server); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_write_twice_wo_handling"); tcase_add_test(tc, test_tcp_write_twice_wo_handling); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_close_client"); tcase_add_test(tc, test_tcp_close_client); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_close_server"); tcase_add_test(tc, test_tcp_close_server); suite_add_tcase(s, tc); tc = tcase_create("test_tcp_get_tcp_info"); tcase_add_test(tc, test_tcp_get_tcp_info); suite_add_tcase(s, tc); #ifdef GALERA_HAVE_SSL // // SSL // generate_certificates(); tc = tcase_create("test_ssl_io_service"); tcase_add_test(tc, test_ssl_io_service); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_socket"); tcase_add_test(tc, test_ssl_socket); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_socket_receive_buffer_unopened"); tcase_add_test(tc, test_ssl_socket_receive_buffer_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_socket_receive_buffer_size"); tcase_add_test(tc, test_ssl_socket_receive_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_socket_send_buffer_unopened"); tcase_add_test(tc, test_ssl_socket_send_buffer_unopened); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_socket_send_buffer_size"); tcase_add_test(tc, test_ssl_socket_send_buffer_size); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_acceptor"); tcase_add_test(tc, test_ssl_acceptor); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_connect"); tcase_add_test(tc, test_ssl_connect); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_connect_twice"); tcase_add_test(tc, test_ssl_connect_twice); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_async_read_write"); tcase_add_test(tc, test_ssl_async_read_write); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_async_read_write_large"); tcase_add_test(tc, test_ssl_async_read_write_large); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_async_read_write_small_large"); tcase_add_test(tc, test_ssl_async_read_write_small_large); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_async_read_from_client_write_from_server"); tcase_add_test(tc, test_ssl_async_read_from_client_write_from_server); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_write_twice_wo_handling"); tcase_add_test(tc, test_ssl_write_twice_wo_handling); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_close_client"); tcase_add_test(tc, test_ssl_close_client); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_close_server"); tcase_add_test(tc, test_ssl_close_server); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_get_tcp_info"); tcase_add_test(tc, test_ssl_get_tcp_info); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_compression_option"); tcase_add_test(tc, test_ssl_compression_option); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_cipher"); tcase_add_test(tc, test_ssl_cipher); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_certificate_chain"); tcase_add_test(tc, test_ssl_certificate_chain); suite_add_tcase(s, tc); tc = tcase_create("test_ssl_invalid_cert"); tcase_add_test(tc, test_ssl_invalid_cert); suite_add_tcase(s, tc); #endif // GALERA_HAVE_SSL tc = tcase_create("test_client_handshake_want_read"); tcase_add_test(tc, test_client_handshake_want_read); suite_add_tcase(s, tc); tc = tcase_create("test_client_handshake_want_write"); tcase_add_test(tc, test_client_handshake_want_write); suite_add_tcase(s, tc); tc = tcase_create("test_client_handshake_eof"); tcase_add_test(tc, test_client_handshake_eof); suite_add_tcase(s, tc); tc = tcase_create("test_client_handshake_eof2"); tcase_add_test(tc, test_client_handshake_eof2); suite_add_tcase(s, tc); tc = tcase_create("test_client_handshake_error"); tcase_add_test(tc, test_client_handshake_error); suite_add_tcase(s, tc); tc = tcase_create("test_client_handshake_error2"); tcase_add_test(tc, test_client_handshake_error2); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_want_read"); tcase_add_test(tc, test_server_handshake_want_read); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_want_write"); tcase_add_test(tc, test_server_handshake_want_write); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_eof"); tcase_add_test(tc, test_server_handshake_eof); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_eof2"); tcase_add_test(tc, test_server_handshake_eof2); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_error"); tcase_add_test(tc, test_server_handshake_error); suite_add_tcase(s, tc); tc = tcase_create("test_server_handshake_error2"); tcase_add_test(tc, test_server_handshake_error2); suite_add_tcase(s, tc); tc = tcase_create("test_accept_after_server_handshake_error"); tcase_add_test(tc, test_accept_after_server_handshake_error); suite_add_tcase(s, tc); tc = tcase_create("test_read_want_read"); tcase_add_test(tc, test_read_want_read); suite_add_tcase(s, tc); tc = tcase_create("test_read_want_write"); tcase_add_test(tc, test_read_want_write); suite_add_tcase(s, tc); tc = tcase_create("test_read_eof"); tcase_add_test(tc, test_read_eof); suite_add_tcase(s, tc); tc = tcase_create("test_read_error"); tcase_add_test(tc, test_read_error); suite_add_tcase(s, tc); tc = tcase_create("test_write_want_read"); tcase_add_test(tc, test_write_want_read); suite_add_tcase(s, tc); tc = tcase_create("test_write_want_write"); tcase_add_test(tc, test_write_want_write); suite_add_tcase(s, tc); tc = tcase_create("test_write_eof"); tcase_add_test(tc, test_write_eof); suite_add_tcase(s, tc); tc = tcase_create("test_write_error"); tcase_add_test(tc, test_write_error); suite_add_tcase(s, tc); // // Datagram // if (have_datagram()) { tc = tcase_create("test_datagram_socket"); tcase_add_test(tc, test_datagram_socket); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_open"); tcase_add_test(tc, test_datagram_open); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_connect"); tcase_add_test(tc, test_datagram_connect); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_open_connect"); tcase_add_test(tc, test_datagram_open_connect); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_send_to_and_async_read"); tcase_add_test(tc, test_datagram_send_to_and_async_read); suite_add_tcase(s, tc); } #if defined(GALERA_ASIO_TEST_MULTICAST) tc = tcase_create("test_datagram_connect_multicast"); tcase_add_test(tc, test_datagram_connect_multicast); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_connect_multicast_local_if"); tcase_add_test(tc, test_datagram_connect_multicast_local_if); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_send_to_and_async_read_multicast"); tcase_add_test(tc, test_datagram_send_to_and_async_read_multicast); suite_add_tcase(s, tc); tc = tcase_create("test_datagram_write_multicast"); tcase_add_test(tc, test_datagram_write_multicast); suite_add_tcase(s, tc); #else (void)test_datagram_connect_multicast; (void)test_datagram_connect_multicast_local_if; (void)test_datagram_send_to_and_async_read_multicast; (void)test_datagram_write_multicast; #endif /* GALERA_ASIO_TEST_MULTICAST */ // // Steady timer // tc = tcase_create("test_steady_timer"); tcase_add_test(tc, test_steady_timer); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/galerautils/tests/gu_gtid_test.hpp000644 000164 177776 00000000273 15107057155 023366 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2015 Codership Oy #ifndef gu_gtid_test_hpp #define gu_gtid_test_hpp #include Suite* gu_gtid_suite(void); #endif /* gu_gtid_test_hpp */ galera-4-26.4.25/galerautils/tests/gu_dbug_test.c000644 000164 177776 00000003177 15107057155 023021 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2008-2017 Codership Oy // $Id$ /* Pthread yield */ #define _GNU_SOURCE 1 #include #include #include #include "gu_dbug_test.h" #include "../src/gu_dbug.h" #include "../src/gu_threads.h" static void cf() { GU_DBUG_ENTER("cf"); GU_DBUG_PRINT("galera", ("hello from cf")); sched_yield(); GU_DBUG_VOID_RETURN; } static void bf() { GU_DBUG_ENTER("bf"); GU_DBUG_PRINT("galera", ("hello from bf")); sched_yield(); cf(); GU_DBUG_VOID_RETURN; } static void af() { GU_DBUG_ENTER("af"); GU_DBUG_PRINT("galera", ("hello from af")); sched_yield(); bf(); GU_DBUG_VOID_RETURN; } static time_t stop = 0; static void *dbg_thr(void *arg) { while (time(NULL) < stop) { af(); } gu_thread_exit(NULL); } START_TEST(gu_dbug_test) { int i; #define N_THREADS 10 gu_thread_t th[N_THREADS]; /* Log > /dev/null */ GU_DBUG_FILE = fopen("/dev/null", "a+"); /* These should not produce output yet */ af(); af(); af(); /* Start logging */ GU_DBUG_PUSH("d:t:i"); GU_DBUG_PRINT("galera", ("Start logging")); af(); af(); af(); /* Run few threads concurrently */ stop = time(NULL) + 2; for (i = 0; i < N_THREADS; i++) gu_thread_create(&th[i], NULL, &dbg_thr, NULL); for (i = 0; i < N_THREADS; i++) gu_thread_join(th[i], NULL); } END_TEST Suite *gu_dbug_suite(void) { Suite *s = suite_create("Galera dbug functions"); TCase *tc = tcase_create("gu_dbug"); suite_add_tcase (s, tc); tcase_add_test (tc, gu_dbug_test); tcase_set_timeout(tc, 60); return s; } galera-4-26.4.25/galerautils/tests/gu_config_test.hpp000644 000164 177776 00000000323 15107057155 023700 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_config_test__ #define __gu_config_test__ #include extern Suite *gu_config_suite(void); #endif /* __gu_config_test__ */ galera-4-26.4.25/galerautils/tests/gu_fifo_test.h000644 000164 177776 00000000260 15107057155 023016 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy // $Id$ #ifndef __gu_fifo_test__ #define __gu_fifo_test__ Suite *gu_fifo_suite(void); #endif /* __gu_fifo_test__ */ galera-4-26.4.25/galerautils/tests/gu_utils_test++.hpp000644 000164 177776 00000000313 15107057155 023720 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2022 Codership Oy #ifndef __gu_utils_test_hpp__ #define __gu_utils_test_hpp__ #include Suite* gu_utils_cpp_suite(); #endif /* __gu_utils_test_hpp__ */ galera-4-26.4.25/galerautils/tests/gu_string_test.hpp000644 000164 177776 00000000331 15107057155 023740 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy * * $Id$ */ #ifndef __gu_string_test__ #define __gu_string_test__ #include extern Suite *gu_string_suite(void); #endif /* __gu_string_test__ */ galera-4-26.4.25/galerautils/tests/avalanche.c000644 000164 177776 00000005101 15107057155 022255 0ustar00jenkinsnogroup000000 000000 /* * Copyright (c) 2012 Codership Oy * * This program is to measure avalanche effect of different hash * implementations, for that it uses 1M of random 8-byte keys. * Use #define macro below to define the implementation to test. * * Compilation: g++ -DHAVE_ENDIAN_H -DHAVE_BYTESWAP_H -O3 -Wall -Wno-unused avalanche.c \ gu_mmh3.c gu_spooky.c -o avalanche && time ./avalanche * Visualization in gnuplot: unset cbtics set xrange [-0.5:64.5] set yrange [-0.5:64.5] set cbrange [0.0:1.0] set xlabel 'Hash bit' set ylabel 'Flipped bit in message' set cblabel 'Hash bit flip probability [0.0 - 1.0]' set palette rgbformula 7,7,7 plot 'avalanche.out' matrix with image */ #include "gu_hash.h" #include #include #include uint64_t flip_count[64*64] = { 0, }; //#define HASH gu_mmh128_64 #define HASH gu_fast_hash64 int main (int argc, char* argv[]) { int n_keys = 1 << 20; int i, j, k; /* collect statistics */ for (k = 0; k < n_keys; k++) { uint64_t key_part = rand(); uint64_t const key = (key_part << 32) + rand(); uint64_t const hash = HASH (&key, sizeof(key)); for (j = 0; j < 64; j++) { uint64_t const flipped_key = key ^ (GU_LONG_LONG(0x01) << j); uint64_t const flipped_hash = HASH (&flipped_key, sizeof(flipped_key)); uint64_t flipped_bits = hash ^ flipped_hash; for (i = 0; i < 64; i++) { int const idx = j * 64 + i; flip_count[idx] += flipped_bits & GU_LONG_LONG(0x01); flipped_bits >>= 1; } } } /* print statistics */ char out_name [256] = { 0, }; snprintf(out_name, sizeof(out_name) - 1, "%s.out", argv[0]); FILE* const out = fopen(out_name, "w"); if (!out) { fprintf (stderr, "Could not open file for writing: '%s': %d (%s)", out_name, errno, strerror(errno)); return errno; } uint64_t base = n_keys; double min_stat = 1.0; double max_stat = 0.0; for (j = 0; j < 64; j++) { for (i = 0; i < 64; i++) { int const idx = j * 64 + i; double stat = (((double)(flip_count[idx]))/base); min_stat = min_stat > stat ? stat : min_stat; max_stat = max_stat < stat ? stat : max_stat; fprintf (out, "%6.4f%c", stat, 63 == i ? '\n' : '\t'); } } fclose(out); printf ("%6.4f : %6.4f (delta: %6.4f)\n", min_stat, max_stat, max_stat - min_stat); return 0; } galera-4-26.4.25/galerautils/tests/gu_fnv_test.h000644 000164 177776 00000000307 15107057155 022666 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy // $Id$ #ifndef __gu_fnv_test__ #define __gu_fnv_test__ #include extern Suite *gu_fnv_suite(void); #endif /* __gu_fnv_test__ */ galera-4-26.4.25/galerautils/tests/gu_lock_step_test.h000644 000164 177776 00000000324 15107057155 024057 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef __gu_lock_step_test__ #define __gu_lock_step_test__ extern Suite *gu_lock_step_suite(void); #endif /* __gu_lock_step_test__ */ galera-4-26.4.25/galerautils/tests/gu_rset_test.cpp000644 000164 177776 00000045620 15107057155 023414 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2020 Codership Oy * * $Id$ */ #undef NDEBUG #include "gu_serialize.hpp" #include "../src/gu_rset.hpp" #include "gu_rset_test.hpp" #include "gu_logger.hpp" #include "gu_hexdump.hpp" #include "gu_macros.h" class TestBaseName : public gu::Allocator::BaseName { std::string str_; public: TestBaseName(const char* name) : str_(name) {} void print(std::ostream& os) const { os << str_; } }; class TestRecord : public gu::Serializable { public: TestRecord (size_t size, const char* str) : Serializable(), size_(size), buf_(static_cast(::malloc(size_))), str_(reinterpret_cast(buf_ + sizeof(uint32_t))), own_(true) { ck_assert (size_ <= 0x7fffffff); if (0 == buf_) throw std::runtime_error("failed to allocate record"); gu::byte_t* tmp = const_cast(buf_); gu::serialize4(uint32_t(size_), tmp, 0); ::strncpy (const_cast(str_), str, size_ - 4); } TestRecord (const gu::byte_t* const buf, ssize_t const size) : Serializable(), size_(TestRecord::serial_size(buf, size)), buf_(buf), str_(reinterpret_cast(buf_ + sizeof(uint32_t))), own_(false) {} TestRecord (const TestRecord& t) : size_(t.size_), buf_(t.buf_), str_(t.str_), own_(false) {} virtual ~TestRecord () { if (own_) free (const_cast(buf_)); } const gu::byte_t* buf() const { return buf_; } const char* c_str() const { return str_; } ssize_t serial_size() const { return my_serial_size(); } static ssize_t serial_size(const gu::byte_t* const buf, ssize_t const size) { check_buf (buf, size, 1); uint32_t ret; gu::unserialize4(buf, 0, ret); return ret; } bool operator!= (const TestRecord& t) const { return (::strcmp(str_, t.str_)); } bool operator== (const TestRecord& t) const { return (!(*this != t)); } private: size_t const size_; const gu::byte_t* const buf_; const char* const str_; bool const own_; ssize_t my_serial_size () const { return size_; }; ssize_t my_serialize_to (void* buf, ssize_t size) const { check_buf (buf, size, size_); ::memcpy (buf, buf_, size_); return size_; } static void check_buf (const void* const buf, ssize_t const size, ssize_t min_size) { if (gu_unlikely (buf == 0 || size < min_size)) throw std::length_error("buffer too short"); } TestRecord& operator= (const TestRecord&); }; START_TEST (empty) { gu::RecordSetIn const rset_in(0, 0); ck_assert(0 == rset_in.size()); ck_assert(0 == rset_in.count()); try { rset_in.checksum(); } catch (std::exception& e) { ck_abort_msg("%s", e.what()); } } END_TEST static void test_version (gu::RecordSet::Version version) { int const alignment(gu::RecordSet::VER2 == version ? gu::RecordSet::VER2_ALIGNMENT : 1); size_t const MB = 1 << 20; // the choice of sizes below is based on default allocator memory store size // of 4MB. If it is changed, these need to be changed too. TestRecord rout0(120, "abc0"); ck_assert(rout0.serial_size() == 120); ck_assert(gtoh32(*reinterpret_cast(rout0.buf())) == 120); TestRecord rout1(121, "abc1"); TestRecord rout2(122, "012345"); TestRecord rout3(123, "defghij"); TestRecord rout4(3*MB, "klm"); TestRecord rout5(1*MB, "qpr"); std::vector records; records.push_back (&rout0); records.push_back (&rout1); records.push_back (&rout2); records.push_back (&rout3); records.push_back (&rout4); records.push_back (&rout5); // ensure alignment to 8 union { gu_word_t align; gu::byte_t buf[1024]; } reserved; assert((uintptr_t(reserved.buf) % GU_WORD_BYTES) == 0); std::ostringstream os; os << "gu_rset_test_ver" << version; TestBaseName str(os.str().c_str()); gu::RecordSetOut rset_out(reserved.buf, sizeof(reserved), str, gu::RecordSet::CHECK_MMH64, version); size_t offset(rset_out.size()); ck_assert(1 == rset_out.page_count()); std::pair rp; int rsize; const void* rout_ptrs[7]; // this should be allocated inside current page rp = rset_out.append (rout0); rout_ptrs[0] = rp.first; rsize = rp.second; ck_assert(rsize == rout0.serial_size()); ck_assert(rsize >= 0); ck_assert(rsize == TestRecord::serial_size(rp.first, rsize)); offset += rsize; ck_assert(rset_out.size() == offset); ck_assert(1 == rset_out.page_count()); // this should trigger new page since not stored rp = rset_out.append (rout1.buf(), rout1.serial_size(), false); rout_ptrs[1] = rp.first; rsize = rp.second; ck_assert(rsize == rout1.serial_size()); offset += rsize; ck_assert(rset_out.size() == offset); if (0 == (offset % alignment)) // aligment page may be required ck_assert(2 == rset_out.page_count()); else ck_assert(3 == rset_out.page_count()); // this should trigger new page since previous one was not stored rp = rset_out.append (rout2); rout_ptrs[2] = rp.first; rsize = rp.second; ck_assert(rsize == rout2.serial_size()); ck_assert(rsize >= 0); ck_assert(rsize == TestRecord::serial_size(rp.first, rsize)); offset += rsize; ck_assert(rset_out.size() == offset); if (0 == (offset % alignment)) // aligment page may be required ck_assert_msg(3 == rset_out.page_count(), "Expected %d pages, found %zu", 3, rset_out.page_count()); else ck_assert_msg(4 == rset_out.page_count(), "Expected %d pages, found %zu", 4, rset_out.page_count()); //***** test partial record appending *****// // this should be allocated inside the current page. rp = rset_out.append (rout3.buf(), 3); // rout_ptrs[2] = rp.first; rsize = rp.second; offset += rp.second; ck_assert((3 + (0 != (offset % alignment))) == rset_out.page_count()); // this should trigger a new page, since not stored rp = rset_out.append (rout3.buf() + 3, rout3.serial_size() - 3, false, false); rout_ptrs[3] = rp.first; rsize += rp.second; ck_assert(rsize == rout3.serial_size()); offset += rp.second; ck_assert(rset_out.size() == offset); ck_assert((4 + (0 != (offset % alignment))) == rset_out.page_count()); // this should trigger new page, because won't fit in the current page rp = rset_out.append (rout4); rout_ptrs[4] = rp.first; rsize = rp.second; ck_assert(rsize == rout4.serial_size()); offset += rsize; ck_assert(rset_out.size() == offset); ck_assert((5 + (0 != (offset % alignment))) == rset_out.page_count()); // this should trigger new page, because 4MB RAM limit exceeded rp = rset_out.append (rout5); rout_ptrs[5] = rp.first; rsize = rp.second; ck_assert(rsize == rout5.serial_size()); offset += rsize; ck_assert(rset_out.size() == offset); if (0 == (offset % alignment)) // aligment page may be required ck_assert_msg(6 == rset_out.page_count(), "Expected %d pages, found %zu", 6, rset_out.page_count()); else ck_assert_msg(7 == rset_out.page_count(), "Expected %d pages, found %zu", 7, rset_out.page_count()); ck_assert(records.size() == size_t(rset_out.count())); gu::RecordSet::GatherVector out_bufs; out_bufs->reserve (rset_out.page_count()); bool const padding_page(offset % alignment); size_t min_out_size(0); for (size_t i = 0; i < records.size(); ++i) { min_out_size += records[i]->serial_size(); } size_t const out_size (rset_out.gather (out_bufs)); ck_assert(out_size == rset_out.serial_size()); ck_assert(out_size > min_out_size && out_size <= offset); ck_assert_msg(out_bufs->size() <= size_t(rset_out.page_count()) && out_bufs->size() >= size_t(rset_out.page_count()-padding_page), "Expected %zu buffers, got: %zd", rset_out.page_count(), out_bufs->size()); ck_assert((out_size % alignment) == 0); // make sure it is aligned /* concatenate all buffers into one */ std::vector in_buf; in_buf.reserve(out_size); mark_point(); for (size_t i = 0; i < out_bufs->size(); ++i) { // 0th fragment starts with header, so it it can't be used in this check // last fragment may be a padding page bool const check_fragment(i > 0 && i < (out_bufs->size()- padding_page)); ck_assert_msg(!check_fragment || rout_ptrs[i] == out_bufs[i].ptr, "Record pointers don't mathch after gather(). " "old: %p, new: %p", rout_ptrs[i],out_bufs[i].ptr); ssize_t size; int const off(gu::unserialize4(out_bufs[i].ptr, 0, size)); const char* str = reinterpret_cast(out_bufs[i].ptr) + off; ck_assert_msg(!check_fragment || size > ssize_t(sizeof(uint32_t)), "Expected size > 4, got %zd(%#010zx). i = %zu, buf = %s", size, size, i, str); // the above variables make have sense only on certain pages // hence ifs below size_t k = i; switch (i) { case 3: break; // 4th page is partial 4th record case 1: case 2: ck_assert_msg(::strcmp(str, records[k]->c_str()) == 0, "Buffer %zu: appending '%s', expected '%s'", i, str, records[k]->c_str()); } if (i == 1 || i == 4) { ck_assert_msg(size == records[k]->serial_size(), "Buffer %zu: appending size %zd, expected %zd", i, size, records[k]->serial_size()); } log_info << "\nadding buf " << i << ": " << gu::Hexdump(out_bufs[i].ptr, std::min(out_bufs[i].size, 24), true); size_t old_size = in_buf.size(); const gu::byte_t* const begin (reinterpret_cast(out_bufs[i].ptr)); in_buf.insert (in_buf.end(), begin, begin + out_bufs[i].size); ck_assert(old_size + out_bufs[i].size == in_buf.size()); } ck_assert_msg(in_buf.size() == out_size, "Sent buf size: %zu, recvd buf size: %zu", out_size, in_buf.size()); log_info << "Resulting RecordSet buffer:\n" << gu::Hexdump(in_buf.data(), 32, false) << '\n' << gu::Hexdump(in_buf.data(), 32, true); gu::RecordSetIn const rset_in(in_buf.data(), in_buf.size()); ck_assert(rset_in.size() == rset_out.size()); ck_assert(rset_in.count() == rset_out.count()); ck_assert(rset_in.serial_size() == rset_out.serial_size()); for (ssize_t i = 0; i < rset_in.count(); ++i) { TestRecord const rin(rset_in.next()); ck_assert_msg(rin == *records[i], "Record %zd failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } /* Test checksum method: */ try { rset_in.checksum(); } catch (std::exception& e) { ck_abort_msg("%s", e.what()); } /* test buf() method */ gu::RecordSetIn const rset_in_buf(rset_in.buf().ptr, rset_in.buf().size); ck_assert(rset_in.count() == rset_in_buf.count()); ck_assert(rset_in.size() == rset_in_buf.size()); ck_assert(rset_in.buf().ptr == rset_in_buf.buf().ptr); for (ssize_t i = 0; i < rset_in_buf.count(); ++i) { TestRecord const rin(rset_in_buf.next()); ck_assert_msg(rin == *records[i], "Record %zd failed: expected %s, found %s", i, records[i]->c_str(), rin.c_str()); } /* test empty RecordSetIn creation with subsequent initialization */ gu::RecordSetIn rset_in_empty; ck_assert(rset_in_empty.size() == 0); ck_assert(rset_in_empty.count() == 0); try { TestRecord const rin(rset_in_empty.next()); ck_abort_msg("next() succeeded on an empty writeset"); } catch (gu::Exception& e) { ck_assert(e.get_errno() == EPERM); } rset_in_empty.init(in_buf.data(), in_buf.size(), true); ck_assert(rset_in_empty.size() == rset_out.size()); ck_assert(rset_in_empty.count() == rset_out.count()); /* Try some data corruption: swap a bit */ in_buf[10] ^= 1; try { rset_in.checksum(); ck_abort_msg("checksum() didn't throw on corrupted set"); } catch (std::exception& e) {} try { rset_in_empty.checksum(); ck_abort_msg("checksum() didn't throw on corrupted set"); } catch (std::exception& e) {} } START_TEST (ver1) { test_version(gu::RecordSet::VER1); } END_TEST START_TEST (ver2) { test_version(gu::RecordSet::VER2); } END_TEST /* This test is to test how padding mixes with persistent (stored outside) * pages. In this case new padding buf needs to be allocated */ static void test_padding(gu::RecordSet::Version rsv) { int const alignment(gu::RecordSet::VER2 == rsv ? gu::RecordSet::VER2_ALIGNMENT : 1); union { gu_word_t align; gu::byte_t buf[1024]; } reserved; assert((uintptr_t(reserved.buf) % GU_WORD_BYTES) == 0); std::ostringstream os; os << "gu_rset_padding_test_ver" << rsv; TestBaseName str(os.str().c_str()); gu::RecordSetOut rso(reserved.buf, sizeof(reserved), str, gu::RecordSet::CHECK_MMH64, rsv); uint64_t const data_out_volatile(0xaabbccdd); uint32_t const data_out_persistent(0xffeeddcc); size_t const payload_size(sizeof(data_out_volatile) + sizeof(data_out_persistent)); bool const padding_page(payload_size % alignment); { uint64_t const d(data_out_volatile); rso.append(&d, sizeof(d), true, false); } rso.append(&data_out_persistent, sizeof(data_out_persistent), false, false); gu::RecordSet::GatherVector out; size_t const out_size(rso.gather(out)); /* here we must get a vector of */ size_t const expected_pages(2 + padding_page); ck_assert_msg(out->size() == expected_pages, "Expected %zu pages, got %zu", expected_pages, out->size()); /* concatenate all out buffers */ std::vector in_buf; in_buf.reserve(out_size); for (size_t i(0); i < out->size(); ++i) { const gu::byte_t* ptr(static_cast(out[i].ptr)); in_buf.insert (in_buf.end(), ptr, ptr + out[i].size); } ck_assert(in_buf.size() == out_size); try { gu::RecordSetIn rsi(in_buf.data(), in_buf.size()); rsi.checksum(); } catch (gu::Exception& e) { ck_abort_msg("%s", e.what()); } } START_TEST (ver1_padding) { test_padding(gu::RecordSet::VER1); } END_TEST START_TEST (ver2_padding) { test_padding(gu::RecordSet::VER2); } END_TEST /* return the total size of serialized record set * @param count number of records * @param size record size */ static size_t ver2_size(int const count, size_t const size, gu::RecordSet::CheckType ct) { typedef std::vector record_t; record_t record(size); assert(record.size() == size); std::ostringstream os; os << "gu_rset_test_ver2_count" << count << "_size" << size; TestBaseName name(os.str().c_str()); gu::RecordSetOut rset(NULL, 0, name, ct, gu::RecordSet::VER2); for (int i(0); i < count; ++i) { rset.append(record.data(), record.size()); } gu::RecordSet::GatherVector out_bufs; out_bufs->reserve(rset.page_count()); size_t const out_size(rset.gather(out_bufs)); ck_assert_msg(0 == (out_size % gu::RecordSet::VER2_ALIGNMENT), "Final size %zu is not multiple of %d. " "Params: count: %d, size: %zu, ct: %d", out_size, gu::RecordSet::VER2_ALIGNMENT, count, size, ct); return out_size; } START_TEST (ver2_sizes) { gu::RecordSet::CheckType ct; size_t s, ct_s, rs, es; int c; #ifdef NDEBUG ct = gu::RecordSet::CHECK_MMH32; ct_s = gu::RecordSet::check_size(ct); try { s = ver2_size(128, 1, ct); ck_abort_msg("Must throw exception!"); } catch(gu::Exception& e) {} #endif c = 1024; // max count allowed in "short" header s = 1; // record size ct = gu::RecordSet::CHECK_NONE; ct_s = gu::RecordSet::check_size(ct); rs = ver2_size(c, s, ct); // expected size: short header(8) + checksum size + aligned payload size es = 8 + ct_s + GU_ALIGN(c*s, gu::RecordSet::VER2_ALIGNMENT); ck_assert(rs == es); c += 1; // this count must force "long" header s = 1; // record size ct = gu::RecordSet::CHECK_MMH64; ct_s = gu::RecordSet::check_size(ct); rs = ver2_size(c, s, ct); // expected size: long header(16) + checksum size + aligned payload size es = 16 + ct_s + GU_ALIGN(c*s, gu::RecordSet::VER2_ALIGNMENT); ck_assert(rs == es); ct = gu::RecordSet::CHECK_MMH128; ct_s = gu::RecordSet::check_size(ct); c = 1; // record count s = (1 << 14) - ct_s - 8; // max record size representable in "short" header rs = ver2_size(c, s, ct); // expected size: long header(16) + checksum size + aligned payload size es = 8 + ct_s + GU_ALIGN(c*s, gu::RecordSet::VER2_ALIGNMENT); ck_assert(rs == es); ct = gu::RecordSet::CHECK_MMH128; ct_s = gu::RecordSet::check_size(ct); c = 1; // record count s += 1; // must force "long" header rs = ver2_size(c, s, ct); // expected size: long header(16) + checksum size + aligned payload size es = 16 + ct_s + GU_ALIGN(c*s, gu::RecordSet::VER2_ALIGNMENT); ck_assert(rs == es); ct = gu::RecordSet::CHECK_NONE; ct_s = gu::RecordSet::check_size(ct); c = 1023; s = 16; rs = ver2_size(c, s, ct); // expected size: long header(16) + checksum size + aligned payload size es = 8 + ct_s + GU_ALIGN(c*s, gu::RecordSet::VER2_ALIGNMENT); ck_assert(rs == es); } END_TEST Suite* gu_rset_suite () { Suite* s(suite_create("gu::RecordSet")); TCase* t(tcase_create("RecordSet v1")); tcase_add_test (t, empty); tcase_add_test (t, ver1); tcase_add_test (t, ver1_padding); #ifndef GALERA_ONLY_ALIGNED suite_add_tcase (s, t); #endif t = tcase_create("RecordSet v2"); tcase_add_test (t, ver2); tcase_add_test (t, ver2_padding); tcase_add_test (t, ver2_sizes); suite_add_tcase (s, t); // tcase_set_timeout(t, 60); return s; } galera-4-26.4.25/galerautils/tests/gu_shared_ptr_test.hpp000644 000164 177776 00000000337 15107057155 024573 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2015 Codership Oy // #ifndef GU_SHARED_PTR_TEST_HPP #define GU_SHARED_PTR_TEST_HPP #include extern Suite* gu_shared_ptr_suite(void); #endif // ! GU_SHARED_PTR_TEST_HPP galera-4-26.4.25/galerautils/tests/gu_uuid_test.c000644 000164 177776 00000002156 15107057155 023042 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include #include #include #include #include "../src/gu_log.h" #include "../src/gu_uuid.h" #include "gu_uuid_test.h" START_TEST (gu_uuid_test) { size_t uuid_num = 10; gu_uuid_t uuid[uuid_num]; size_t i; uuid[0] = GU_UUID_NIL; gu_uuid_generate (&uuid[0], NULL, 0); ck_assert(memcmp (&uuid[0], &GU_UUID_NIL, sizeof(gu_uuid_t))); ck_assert(gu_uuid_compare(&uuid[0], &GU_UUID_NIL)); for (i = 1; i < uuid_num; i++) { uuid[i] = GU_UUID_NIL; gu_uuid_generate (&uuid[i], NULL, 0); ck_assert(gu_uuid_compare(&uuid[i], &GU_UUID_NIL)); ck_assert(gu_uuid_compare(&uuid[i], &uuid[i - 1])); ck_assert(1 == gu_uuid_older (&uuid[i - 1], &uuid[i])); ck_assert(-1 == gu_uuid_older (&uuid[i], &uuid[i - 1])); } } END_TEST Suite *gu_uuid_suite(void) { Suite *suite = suite_create("Galera UUID utils"); TCase *tcase = tcase_create("gu_uuid"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gu_uuid_test); return suite; } galera-4-26.4.25/galerautils/tests/gu_mem_pool_test.hpp000644 000164 177776 00000000333 15107057155 024243 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy // $Id$ #ifndef __gu_mem_pool_test__ #define __gu_mem_pool_test__ #include extern Suite *gu_mem_pool_suite(void); #endif /* __gu_mem_pool_test__ */ galera-4-26.4.25/galerautils/src/000755 000164 177776 00000000000 15107057160 017613 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/galerautils/src/gu_crc32c_x86.c000644 000164 177776 00000007645 15107057155 022256 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2020-2023 Codership Oy */ /** * @file Hardware-accelerated implementation of CRC32C algorithm using Intel's * x86 instructions. * * Defines gu_crc32c_hardware() that returns pointer to gu_crc32c_func_t if * available on a given CPU. */ #include "gu_crc32c.h" #if defined(GU_CRC32C_X86) #include "gu_log.h" #include #include /* process data preceding the first 4-aligned byte */ static inline gu_crc32c_t crc32c_x86_head3(gu_crc32c_t state, const uint8_t* ptr, size_t len) { assert(len > 0); assert(len < 4); if (((uintptr_t)ptr) & 1) { /* odd address */ state = __builtin_ia32_crc32qi(state, *ptr); ptr++; len--; } /* here ptr is at least 2-aligned */ if (len >= 2) { assert(0 == ((uintptr_t)ptr)%2); state = __builtin_ia32_crc32hi(state, *(uint16_t*)ptr); ptr += 2; len -= 2; } if (len) { assert(1 == len); state = __builtin_ia32_crc32qi(state, *ptr); } return state; } static inline gu_crc32c_t crc32c_x86_tail3(gu_crc32c_t state, const uint8_t* ptr, size_t len) { switch (len) { case 3: case 2: /* this byte is 4-aligned */ state = __builtin_ia32_crc32hi(state, *(uint16_t*)ptr); if (len == 2) break; ptr += 2; /* fall through */ case 1: state = __builtin_ia32_crc32qi(state, *ptr); break; default: assert(0); } return state; } static inline gu_crc32c_t crc32c_x86(gu_crc32c_t state, const uint8_t* ptr, size_t len) { if (0 == len) return state; static size_t const arg_size = sizeof(uint32_t); size_t align_offset = ((uintptr_t)ptr) % arg_size; if (align_offset) { align_offset = arg_size - align_offset; if (align_offset > len) align_offset = len; state = crc32c_x86_head3(state, ptr, align_offset); len -= align_offset; ptr += align_offset; } while (len >= arg_size) { state = __builtin_ia32_crc32si(state, *(uint32_t*)ptr); len -= arg_size; ptr += arg_size; } assert(len < 4); return (len ? crc32c_x86_tail3(state, ptr, len) : state); } gu_crc32c_t gu_crc32c_x86(gu_crc32c_t state, const void* data, size_t len) { return crc32c_x86(state, (const uint8_t*)data, len); } #if defined(GU_CRC32C_X86_64) gu_crc32c_t gu_crc32c_x86_64(gu_crc32c_t state, const void* data, size_t len) { const uint8_t* ptr = (const uint8_t*)data; #ifdef __LP64__ if (0 == len) return state; static size_t const arg_size = sizeof(uint64_t); size_t align_offset = ((uintptr_t)ptr) % arg_size; if (align_offset) { align_offset = arg_size - align_offset; if (align_offset > len) align_offset = len; state = crc32c_x86(state, ptr, align_offset); len -= align_offset; ptr += align_offset; } uint64_t state64 = state; while (len >= arg_size) { state64 = __builtin_ia32_crc32di(state64, *(uint64_t*)ptr); len -= arg_size; ptr += arg_size; } state = (uint32_t)state64; #endif /* __LP64__ */ return crc32c_x86(state, ptr, len); } #endif /* GU_CRC32C_X86_64 */ #include static uint32_t x86_cpuid(uint32_t input) { uint32_t eax, ebx, ecx, edx; if (__get_cpuid(input, &eax, &ebx, &ecx, &edx)) return ecx; else return 0; } gu_crc32c_func_t gu_crc32c_hardware() { static uint32_t const SSE42_BIT = 1 << 20; uint32_t const cpuid = x86_cpuid(1); bool const SSE42_present = cpuid & SSE42_BIT; if (SSE42_present) { #if defined(GU_CRC32C_X86_64) gu_info ("CRC-32C: using 64-bit x86 acceleration."); return gu_crc32c_x86_64; #else gu_info ("CRC-32C: using 32-bit x86 acceleration."); return gu_crc32c_x86; #endif } else { return NULL; } } #endif /* GU_CRC32C_X86 */ galera-4-26.4.25/galerautils/src/galerautils.h000644 000164 177776 00000001324 15107057155 022304 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2017 Codership Oy /** * @file GaleraUtils main header file * * $Id$ */ #ifndef _galerautils_h_ #define _galerautils_h_ #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #include "gu_macros.h" #include "gu_limits.h" #include "gu_byteswap.h" #include "gu_time.h" #include "gu_log.h" #include "gu_conf.h" #include "gu_assert.h" #include "gu_mem.h" #include "gu_threads.h" #include "gu_dbug.h" #include "gu_fifo.h" #include "gu_uuid.h" #include "gu_to.h" #include "gu_lock_step.h" #include "gu_utils.h" #include "gu_config.h" #include "gu_abort.h" #include "gu_errno.h" #include "gu_atomic.h" #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _galerautils_h_ */ galera-4-26.4.25/galerautils/src/gu_uuid.cpp000644 000164 177776 00000001075 15107057155 021767 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014-2017 Codership Oy */ #include "gu_uuid.hpp" #include namespace { class scan_error_message { std::ostringstream os_; public: scan_error_message(const std::string& s) : os_() { os_ << "could not parse UUID from '" << s << '\''; } const std::ostringstream& os() const { return os_; } }; } namespace gu { UUIDScanException::UUIDScanException(const std::string& s) : Exception(scan_error_message(s).os().str(), EINVAL) {} } galera-4-26.4.25/galerautils/src/gu_buffer.cpp000644 000164 177776 00000000131 15107057155 022262 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2010 Codership Oy // #include "gu_buffer.hpp" galera-4-26.4.25/galerautils/src/gu_mmh3.c000644 000164 177776 00000033030 15107057155 021321 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy /** * @file MurmurHash3 implementation * (slightly rewritten from the reference C++ impl.) * * $Id$ */ #include "gu_mmh3.h" #include "gu_byteswap.h" #include // for memset() and memcpy() //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche static GU_FORCE_INLINE uint32_t _mmh3_fmix32 (uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } static GU_FORCE_INLINE uint64_t _mmh3_fmix64 (uint64_t k) { k ^= k >> 33; k *= GU_ULONG_LONG(0xff51afd7ed558ccd); k ^= k >> 33; k *= GU_ULONG_LONG(0xc4ceb9fe1a85ec53); k ^= k >> 33; return k; } //----------------------------------------------------------------------------- static uint32_t const _mmh3_32_c1 = 0xcc9e2d51; static uint32_t const _mmh3_32_c2 = 0x1b873593; static GU_FORCE_INLINE void _mmh3_block_32 (uint32_t k1, uint32_t* h1) { k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; *h1 ^= k1; *h1 = GU_ROTL32(*h1,13); *h1 *= 5; *h1 += 0xe6546b64; } static GU_FORCE_INLINE void _mmh3_blocks_32 (const uint32_t* const blocks,size_t const nblocks,uint32_t* h1) { //---------- // body for (size_t i = 0; i < nblocks; i++) { //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here uint32_t k; memcpy(&k, &blocks[i], sizeof(k)); _mmh3_block_32 (gu_le32(k), h1); /* convert from little-endian */ } } static GU_FORCE_INLINE uint32_t _mmh3_tail_32 (const uint8_t* const tail, size_t const len, uint32_t h1) { //---------- // tail #if 0 /* Reference implementation */ uint32_t k1 = 0; switch(len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; h1 ^= k1; }; #else /* Optimized implementation */ size_t const shift = (len & 3) << 3; if (shift) { uint32_t k1; memcpy(&k1, tail, sizeof(k1)); k1 = gu_le32(k1) & (0x00ffffff >> (24-shift)); k1 *= _mmh3_32_c1; k1 = GU_ROTL32(k1,15); k1 *= _mmh3_32_c2; h1 ^= k1; } #endif /* Optimized implementation */ //---------- // finalization h1 ^= len; h1 = _mmh3_fmix32(h1); return h1; } static GU_FORCE_INLINE uint32_t _mmh32_seed (const void* key, size_t const len, uint32_t seed) { size_t const nblocks = len >> 2; const uint32_t* const blocks = (const uint32_t*)key; const uint8_t* const tail = (const uint8_t*)(blocks + nblocks); _mmh3_blocks_32 (blocks, nblocks, &seed); return _mmh3_tail_32 (tail, len, seed); } // same as FNV32 seed static uint32_t const GU_MMH32_SEED = GU_ULONG(2166136261); /*! A function to hash buffer in one go */ uint32_t gu_mmh32(const void* const buf, size_t const len) { return _mmh32_seed (buf, len, GU_MMH32_SEED); } /* * 128-bit MurmurHash3 */ static uint64_t const _mmh3_128_c1 = GU_ULONG_LONG(0x87c37b91114253d5); static uint64_t const _mmh3_128_c2 = GU_ULONG_LONG(0x4cf5ad432745937f); static GU_FORCE_INLINE void _mmh3_128_block (uint64_t k1, uint64_t k2, uint64_t* h1, uint64_t* h2) { k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; *h1 ^= k1; *h1 = GU_ROTL64(*h1,27); *h1 += *h2; *h1 *= 5; *h1 += 0x52dce729; k2 *= _mmh3_128_c2; k2 = GU_ROTL64(k2,33); k2 *= _mmh3_128_c1; *h2 ^= k2; *h2 = GU_ROTL64(*h2,31); *h2 += *h1; *h2 *= 5; *h2 += 0x38495ab5; } static GU_FORCE_INLINE void _mmh3_128_blocks (const uint64_t* const blocks, size_t const nblocks, uint64_t* h1, uint64_t* h2) { //---------- // body for(size_t i = 0; i < nblocks; i += 2) { //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here uint64_t k[2]; memcpy(k, &blocks[i], sizeof(k)); _mmh3_128_block (gu_le64(k[0]), gu_le64(k[1]), h1, h2); } } static GU_FORCE_INLINE void _mmh3_128_tail (const uint8_t* const tail, size_t const len, uint64_t h1, uint64_t h2, uint64_t* const out) { //---------- // tail uint64_t k1 = 0; uint64_t k2 = 0; switch(len & 15) { case 15: k2 ^= ((uint64_t)tail[14]) << 48; // fall through case 14: k2 ^= ((uint64_t)tail[13]) << 40; // fall through case 13: k2 ^= ((uint64_t)tail[12]) << 32; // fall through case 12: k2 ^= ((uint64_t)tail[11]) << 24; // fall through case 11: k2 ^= ((uint64_t)tail[10]) << 16; // fall through case 10: k2 ^= ((uint64_t)tail[ 9]) << 8; // fall through case 9: k2 ^= ((uint64_t)tail[ 8]) << 0; k2 *= _mmh3_128_c2; k2 = GU_ROTL64(k2,33); k2 *= _mmh3_128_c1; h2 ^= k2; memcpy(&k1, tail, sizeof(k1)); k1 = gu_le64(k1); k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; h1 ^= k1; break; case 8: k1 ^= ((uint64_t)tail[ 7]) << 56; // fall through case 7: k1 ^= ((uint64_t)tail[ 6]) << 48; // fall through case 6: k1 ^= ((uint64_t)tail[ 5]) << 40; // fall through case 5: k1 ^= ((uint64_t)tail[ 4]) << 32; // fall through case 4: k1 ^= ((uint64_t)tail[ 3]) << 24; // fall through case 3: k1 ^= ((uint64_t)tail[ 2]) << 16; // fall through case 2: k1 ^= ((uint64_t)tail[ 1]) << 8; // fall through case 1: k1 ^= ((uint64_t)tail[ 0]) << 0; k1 *= _mmh3_128_c1; k1 = GU_ROTL64(k1,31); k1 *= _mmh3_128_c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = _mmh3_fmix64(h1); h2 = _mmh3_fmix64(h2); h1 += h2; h2 += h1; out[0] = h1; out[1] = h2; } static GU_FORCE_INLINE void _mmh3_128_seed (const void* const key, size_t const len, uint64_t s1, uint64_t s2, uint64_t* const out) { size_t const nblocks = (len >> 4) << 1; /* using 64-bit half-blocks */ const uint64_t* const blocks = (const uint64_t*)(key); const uint8_t* const tail = (const uint8_t*)(blocks + nblocks); _mmh3_128_blocks (blocks, nblocks, &s1, &s2); _mmh3_128_tail (tail, len, s1, s2, out); } // same as FNV128 seed static uint64_t const GU_MMH128_SEED1 = GU_ULONG_LONG(0x6C62272E07BB0142); static uint64_t const GU_MMH128_SEED2 = GU_ULONG_LONG(0x62B821756295C58D); /* returns hash in the canonical byte order, as a byte array */ extern void gu_mmh128 (const void* const msg, size_t const len, void* const out) { uint64_t res[2]; _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, res); res[0] = gu_le64(res[0]); res[1] = gu_le64(res[1]); memcpy(out, res, sizeof(res)); } /* returns hash as an integer, in host byte-order */ extern uint64_t gu_mmh128_64 (const void* const msg, size_t len) { uint64_t res[2]; _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, res); return res[0]; } /* returns hash as an integer, in host byte-order */ extern uint32_t gu_mmh128_32 (const void* const msg, size_t len) { uint64_t res[2]; _mmh3_128_seed (msg, len, GU_MMH128_SEED1, GU_MMH128_SEED2, res); return (uint32_t)res[0]; } /* * Functions to hash message by parts * (only 128-bit version, 32-bit is not relevant any more) */ /*! Initialize/reset MMH context with a particular seed. * The seed is two 8-byte _integers_, obviously in HOST BYTE ORDER. * Should not be used directly. */ static GU_INLINE void _mmh128_init_seed (gu_mmh128_ctx_t* const mmh, uint64_t const s1, uint64_t const s2) { memset (mmh, 0, sizeof(*mmh)); mmh->hash[0] = s1; mmh->hash[1] = s2; } /*! Initialize MMH context with a default Galera seed. */ void gu_mmh128_init(gu_mmh128_ctx_t* const mmh) { _mmh128_init_seed (mmh, GU_MMH128_SEED1, GU_MMH128_SEED2); } /*! Apeend message part to hash context */ void gu_mmh128_append (gu_mmh128_ctx_t* const mmh, const void* part, size_t len) { size_t tail_len = mmh->length & 15; mmh->length += len; if (tail_len) /* there's something in the tail */// do we need this if()? { size_t const to_fill = 16 - tail_len; void* const tail_end = (uint8_t*)mmh->tail + tail_len; if (len >= to_fill) /* we can fill a full block */ { memcpy (tail_end, part, to_fill); _mmh3_128_block (gu_le64(mmh->tail[0]), gu_le64(mmh->tail[1]), &mmh->hash[0], &mmh->hash[1]); part = ((char*)part) + to_fill; len -= to_fill; } else { memcpy (tail_end, part, len); return; } } size_t const nblocks = (len >> 4) << 1; /* using 64-bit half-blocks */ const uint64_t* const blocks = (const uint64_t*)(part); _mmh3_128_blocks (blocks, nblocks, &mmh->hash[0], &mmh->hash[1]); /* save possible trailing bytes to tail */ memcpy (mmh->tail, blocks + nblocks, len & 15); } /*! Get the accumulated message hash (does not change the context) */ void gu_mmh128_get (const gu_mmh128_ctx_t* const mmh, void* const res) { uint64_t r[2]; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], r); r[0] = gu_le64(r[0]); r[1] = gu_le64(r[1]); memcpy(res, r, sizeof(r)); } uint64_t gu_mmh128_get64 (const gu_mmh128_ctx_t* const mmh) { uint64_t res[2]; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], res); return res[0]; } uint32_t gu_mmh128_get32 (const gu_mmh128_ctx_t* const mmh) { uint64_t res[2]; _mmh3_128_tail ((const uint8_t*)mmh->tail, mmh->length, mmh->hash[0], mmh->hash[1], res); return (uint32_t)res[0]; } void gu_mmh3_32 (const void* const key, int const len, uint32_t const seed, void* const out) { uint32_t res = _mmh32_seed (key, len, seed); res = gu_le32(res); memcpy(out, &res, sizeof(res)); } //----------------------------------------------------------------------------- #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ void gu_mmh3_x86_128 (const void* key, const int len, const uint32_t seed, void* out) { const uint8_t* data = (const uint8_t*)key; const int nblocks = len >> 4; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; //---------- // body const uint32_t* blocks = (const uint32_t*)(data + (nblocks << 4)); int i; for(i = -nblocks; i; i++) { uint32_t k1 = gu_le32(blocks[(i << 2) + 0]); uint32_t k2 = gu_le32(blocks[(i << 2) + 1]); uint32_t k3 = gu_le32(blocks[(i << 2) + 2]); uint32_t k4 = gu_le32(blocks[(i << 2) + 3]); k1 *= c1; k1 = GU_ROTL32(k1,15); k1 *= c2; h1 ^= k1; h1 = GU_ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; k2 *= c2; k2 = GU_ROTL32(k2,16); k2 *= c3; h2 ^= k2; h2 = GU_ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; k3 *= c3; k3 = GU_ROTL32(k3,17); k3 *= c4; h3 ^= k3; h3 = GU_ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; k4 *= c4; k4 = GU_ROTL32(k4,18); k4 *= c1; h4 ^= k4; h4 = GU_ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; } //---------- // tail const uint8_t * tail = (const uint8_t*)(blocks); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch(len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = GU_ROTL32(k4,18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = GU_ROTL32(k3,17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = GU_ROTL32(k2,16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = GU_ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = _mmh3_fmix32(h1); h2 = _mmh3_fmix32(h2); h3 = _mmh3_fmix32(h3); h4 = _mmh3_fmix32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; ((uint32_t*)out)[0] = gu_le32(h1); ((uint32_t*)out)[1] = gu_le32(h2); ((uint32_t*)out)[2] = gu_le32(h3); ((uint32_t*)out)[3] = gu_le32(h4); } #endif /* 0 */ //----------------------------------------------------------------------------- void gu_mmh3_x64_128 (const void* key, int len, uint32_t const seed, void* const out) { uint64_t* const res = (uint64_t*)out; _mmh3_128_seed (key, len, seed, seed, res); res[0] = gu_le64(res[0]); res[1] = gu_le64(res[1]); } galera-4-26.4.25/galerautils/src/gu_fnv_bench.c000644 000164 177776 00000014523 15107057155 022413 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /*! * @file Benchmark for different hash implementations: * fnv32, fnv64, fnv128, mmh3, md5 from libssl and md5 from crypto++ * * To compile on Ubuntu: g++ -DHAVE_ENDIAN_H -DHAVE_BYTESWAP_H -DGALERA_LOG_H_ENABLE_CXX \ -O3 -march=native -msse4 -Wall -Werror -I../.. gu_fnv_bench.c \ gu_mmh3.c gu_spooky.c gu_log.c gu_crc32c.c gu_crc32c_x86.c \ -lssl -lcrypto -lcrypto++ -o gu_fnv_bench * * on CentOS some play with -lcrypto++ may be needed (also see includes below) * * To run: * gu_fnv_bench */ #include "gu_crc32c.h" #include "gu_fnv.h" #include "gu_mmh3.h" #include "gu_spooky.h" #include "gu_hash.h" #include #include #include #include #include #define CRYPTOPP_ENABLE_NAMESPACE_WEAK 1 #include enum algs { CRC32sw, CRC32hw, FNV32, FNV64, FNV128, MMH32, MMH128, SPOOKYS, SPOOKY, MD5SSL, MD5CPP, FAST128, TABLE }; static int timer (const void* const buf, ssize_t const length, size_t const loops, enum algs const type) { double begin, end; struct timeval tv; const char* alg = "undefined"; size_t volatile h; // this variable serves to prevent compiler from // optimizing out the calls size_t const modulo = sizeof(void*) - 1; size_t const len = length - modulo; // correct length for offset const uint8_t* const ptr = (const uint8_t*)buf; gettimeofday (&tv, NULL); begin = (double)tv.tv_sec + 1.e-6 * tv.tv_usec; #ifdef EXTERNAL_LOOP #define EXTERNAL_LOOP_BEGIN for (size_t i = 0; i < loops; i++) { \ size_t const off = i & modulo; #define EXTERNAL_LOOP_END } #define INTERNAL_LOOP_BEGIN #define INTERNAL_LOOP_END #else #define EXTERNAL_LOOP_BEGIN #define EXTERNAL_LOOP_END #define INTERNAL_LOOP_BEGIN for (size_t i = 0; i < loops; i++) { \ size_t const off = i & modulo; #define INTERNAL_LOOP_END } #endif EXTERNAL_LOOP_BEGIN switch (type) { case CRC32sw: case CRC32hw: { if (CRC32sw == type) alg = "crc32sw"; else alg = "crc32hw"; INTERNAL_LOOP_BEGIN h = gu_crc32c (ptr + off, len); INTERNAL_LOOP_END break; } case FNV32: { alg = "fnv32a"; INTERNAL_LOOP_BEGIN uint32_t hash = GU_FNV32_SEED; gu_fnv32a_internal (ptr + off, len, &hash); h = hash; INTERNAL_LOOP_END break; } case FNV64: { alg = "fnv64a"; INTERNAL_LOOP_BEGIN uint64_t hash = GU_FNV64_SEED;; gu_fnv64a_internal (ptr + off, len, &hash); h = hash; INTERNAL_LOOP_END break; } case FNV128: { alg = "fnv128"; INTERNAL_LOOP_BEGIN gu_uint128_t hash = GU_FNV128_SEED; gu_fnv128a_internal (ptr + off, len, &hash); #if defined(__SIZEOF_INT128__) h = hash; #else h = hash.u32[GU_32LO]; #endif INTERNAL_LOOP_END break; } case MMH32: { alg = "mmh32"; INTERNAL_LOOP_BEGIN h = gu_mmh32 (ptr + off, len); INTERNAL_LOOP_END break; } case MMH128: { alg = "mmh128"; INTERNAL_LOOP_BEGIN gu_uint128_t hash; gu_mmh128 (ptr + off, len, &hash); #if defined(__SIZEOF_INT128__) h = hash; #else h = hash.u32[GU_32LO]; #endif INTERNAL_LOOP_END break; } case SPOOKYS: { alg = "SpookyS"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_spooky_short (ptr + off, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case SPOOKY: { alg = "Spooky"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_spooky_inline (ptr + off, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case MD5SSL: { alg = "md5ssl"; INTERNAL_LOOP_BEGIN unsigned char md[MD5_DIGEST_LENGTH]; MD5 (ptr + off, len, md); INTERNAL_LOOP_END break; } case MD5CPP: { alg = "md5cpp"; INTERNAL_LOOP_BEGIN unsigned char md[16]; CryptoPP::Weak::MD5().CalculateDigest(md, ptr + off, len); INTERNAL_LOOP_END break; } case FAST128: { alg = "fast128"; INTERNAL_LOOP_BEGIN uint64_t hash[2]; gu_fast_hash128 (ptr + off, len, hash); h = hash[0]; INTERNAL_LOOP_END break; } case TABLE: { alg = "table"; INTERNAL_LOOP_BEGIN h = gu_table_hash (ptr + off, len); INTERNAL_LOOP_END break; } } EXTERNAL_LOOP_END gettimeofday (&tv, NULL); end = (double)tv.tv_sec + 1.e-6 * tv.tv_usec; end -= begin; return printf ("%s: %zu loops, %6.3f seconds, %8.3f Mb/sec%s\n", alg, loops, end, ((double)loops * len)/end/1024/1024, h ? "" : " "); } int main (int argc, char* argv[]) { ssize_t buf_size = (1<<20); // 1Mb long long loops = 1000; if (argc > 1) buf_size = strtoll (argv[1], NULL, 10); if (argc > 2) loops = strtoll (argv[2], NULL, 10); /* initialization of data buffer */ ssize_t buf_size_int = buf_size / sizeof(int) + 1; int* buf = (int*) malloc (buf_size_int * sizeof(int)); if (!buf) return ENOMEM; while (buf_size_int) buf[--buf_size_int] = rand(); gu_crc32c_configure(); // compute lookup tables #ifndef GU_CRC32C_NO_HARDWARE gu_crc32c_func = gu_crc32c_hardware(); // try hardware CRC32C if (gu_crc32c_func) timer(buf, buf_size, loops, CRC32hw); gu_crc32c_func = gu_crc32c_slicing_by_8; // force software CRC32C #endif timer (buf, buf_size, loops, CRC32sw); timer (buf, buf_size, loops, FNV32); timer (buf, buf_size, loops, FNV64); timer (buf, buf_size, loops, FNV128); timer (buf, buf_size, loops, MMH32); timer (buf, buf_size, loops, MMH128); // timer (buf, buf_size, loops, SPOOKYS); timer (buf, buf_size, loops, SPOOKY); // timer (buf, buf_size, loops, MD5SSL); timer (buf, buf_size, loops, MD5CPP); timer (buf, buf_size, loops, FAST128); timer (buf, buf_size, loops, TABLE); return 0; } galera-4-26.4.25/galerautils/src/gu_spooky.c000644 000164 177776 00000000440 15107057155 022000 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /** * @file external Spooky hash implementation to avoid code bloat * * $Id$ */ #include "gu_spooky.h" void gu_spooky128_host (const void* const msg, size_t const len, uint64_t* res) { gu_spooky_inline (msg, len, res); } galera-4-26.4.25/galerautils/src/gu_asio_ssl.hpp000644 000164 177776 00000001104 15107057155 022633 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @file gu_asio_ssl.hpp * * Common helpers for SSL operations. */ #ifndef GU_ASIO_SSL_HPP #define GU_ASIO_SSL_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #ifdef GALERA_HAVE_SSL bool exclude_ssl_error(const asio::error_code& ec); std::string extra_error_info(const asio::error_code& ec); #else // GALERA_HAVE_SSL static inline std::string extra_error_info(const asio::error_code&) { return ""; } #endif // GALERA_HAVE_SSL #endif // GU_ASIO_SSL_HPP galera-4-26.4.25/galerautils/src/gu_spooky.h000644 000164 177776 00000033404 15107057155 022013 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2017 Codership Oy /*! * @file Spooky hash by Bob Jenkins: * http://www.burtleburtle.net/bob/c/spooky.h * * Original author comments preserved in C++ style. * Original code is public domain * * $Id$ */ #ifndef _gu_spooky_h_ #define _gu_spooky_h_ #include "gu_types.h" #include "gu_byteswap.h" #ifdef __cplusplus extern "C" { #endif #include // for memcpy() /*! GCC complains about 'initializer element is not constant', hence macros */ #define _spooky_numVars 12 #define _spooky_blockSize 96 /* (_spooky_numVars * 8) */ #define _spooky_bufSize 192 /* (_spooky_blockSize * 2) */ static uint64_t const _spooky_const = GU_ULONG_LONG(0xDEADBEEFDEADBEEF); // // This is used if the input is 96 bytes long or longer. // // The internal state is fully overwritten every 96 bytes. // Every input bit appears to cause at least 128 bits of entropy // before 96 other bytes are combined, when run forward or backward // For every input bit, // Two inputs differing in just that input bit // Where "differ" means xor or subtraction // And the base value is random // When run forward or backwards one Mix // I tried 3 pairs of each; they all differed by at least 212 bits. // static GU_FORCE_INLINE void _spooky_mix( const uint64_t *data, uint64_t* s0, uint64_t* s1, uint64_t* s2, uint64_t* s3, uint64_t* s4, uint64_t* s5, uint64_t* s6, uint64_t* s7, uint64_t* s8, uint64_t* s9, uint64_t* sA, uint64_t* sB) { *s0 += gu_le64(data[0]); *s2 ^= *sA; *sB ^= *s0; *s0 =GU_ROTL64(*s0,11); *sB += *s1; *s1 += gu_le64(data[1]); *s3 ^= *sB; *s0 ^= *s1; *s1 =GU_ROTL64(*s1,32); *s0 += *s2; *s2 += gu_le64(data[2]); *s4 ^= *s0; *s1 ^= *s2; *s2 =GU_ROTL64(*s2,43); *s1 += *s3; *s3 += gu_le64(data[3]); *s5 ^= *s1; *s2 ^= *s3; *s3 =GU_ROTL64(*s3,31); *s2 += *s4; *s4 += gu_le64(data[4]); *s6 ^= *s2; *s3 ^= *s4; *s4 =GU_ROTL64(*s4,17); *s3 += *s5; *s5 += gu_le64(data[5]); *s7 ^= *s3; *s4 ^= *s5; *s5 =GU_ROTL64(*s5,28); *s4 += *s6; *s6 += gu_le64(data[6]); *s8 ^= *s4; *s5 ^= *s6; *s6 =GU_ROTL64(*s6,39); *s5 += *s7; *s7 += gu_le64(data[7]); *s9 ^= *s5; *s6 ^= *s7; *s7 =GU_ROTL64(*s7,57); *s6 += *s8; *s8 += gu_le64(data[8]); *sA ^= *s6; *s7 ^= *s8; *s8 =GU_ROTL64(*s8,55); *s7 += *s9; *s9 += gu_le64(data[9]); *sB ^= *s7; *s8 ^= *s9; *s9 =GU_ROTL64(*s9,54); *s8 += *sA; *sA += gu_le64(data[10]); *s0 ^= *s8; *s9 ^= *sA; *sA =GU_ROTL64(*sA,22); *s9 += *sB; *sB += gu_le64(data[11]); *s1 ^= *s9; *sA ^= *sB; *sB =GU_ROTL64(*sB,46); *sA += *s0; } // // Mix all 12 inputs together so that h0, h1 are a hash of them all. // // For two inputs differing in just the input bits // Where "differ" means xor or subtraction // And the base value is random, or a counting value starting at that bit // The final result will have each bit of h0, h1 flip // For every input bit, // with probability 50 +- .3% // For every pair of input bits, // with probability 50 +- 3% // // This does not rely on the last Mix() call having already mixed some. // Two iterations was almost good enough for a 64-bit result, but a // 128-bit result is reported, so End() does three iterations. // static GU_FORCE_INLINE void _spooky_end_part( uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3, uint64_t* h4, uint64_t* h5, uint64_t* h6, uint64_t* h7, uint64_t* h8, uint64_t* h9, uint64_t* h10,uint64_t* h11) { *h11+= *h1; *h2 ^= *h11; *h1 = GU_ROTL64(*h1,44); *h0 += *h2; *h3 ^= *h0; *h2 = GU_ROTL64(*h2,15); *h1 += *h3; *h4 ^= *h1; *h3 = GU_ROTL64(*h3,34); *h2 += *h4; *h5 ^= *h2; *h4 = GU_ROTL64(*h4,21); *h3 += *h5; *h6 ^= *h3; *h5 = GU_ROTL64(*h5,38); *h4 += *h6; *h7 ^= *h4; *h6 = GU_ROTL64(*h6,33); *h5 += *h7; *h8 ^= *h5; *h7 = GU_ROTL64(*h7,10); *h6 += *h8; *h9 ^= *h6; *h8 = GU_ROTL64(*h8,13); *h7 += *h9; *h10^= *h7; *h9 = GU_ROTL64(*h9,38); *h8 += *h10; *h11^= *h8; *h10= GU_ROTL64(*h10,53); *h9 += *h11; *h0 ^= *h9; *h11= GU_ROTL64(*h11,42); *h10+= *h0; *h1 ^= *h10; *h0 = GU_ROTL64(*h0,54); } static GU_FORCE_INLINE void _spooky_end( uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3, uint64_t* h4, uint64_t* h5, uint64_t* h6, uint64_t* h7, uint64_t* h8, uint64_t* h9, uint64_t* h10,uint64_t* h11) { #if 0 _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); #endif int i; for (i = 0; i < 3; i++) { _spooky_end_part(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); } } // // The goal is for each bit of the input to expand into 128 bits of // apparent entropy before it is fully overwritten. // n trials both set and cleared at least m bits of h0 h1 h2 h3 // n: 2 m: 29 // n: 3 m: 46 // n: 4 m: 57 // n: 5 m: 107 // n: 6 m: 146 // n: 7 m: 152 // when run forwards or backwards // for all 1-bit and 2-bit diffs // with diffs defined by either xor or subtraction // with a base of all zeros plus a counter, or plus another bit, or random // static GU_FORCE_INLINE void _spooky_short_mix(uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3) { *h2 = GU_ROTL64(*h2,50); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,52); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,30); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,41); *h1 += *h2; *h3 ^= *h1; *h2 = GU_ROTL64(*h2,54); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,48); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,38); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,37); *h1 += *h2; *h3 ^= *h1; *h2 = GU_ROTL64(*h2,62); *h2 += *h3; *h0 ^= *h2; *h3 = GU_ROTL64(*h3,34); *h3 += *h0; *h1 ^= *h3; *h0 = GU_ROTL64(*h0,5); *h0 += *h1; *h2 ^= *h0; *h1 = GU_ROTL64(*h1,36); *h1 += *h2; *h3 ^= *h1; } // // Mix all 4 inputs together so that h0, h1 are a hash of them all. // // For two inputs differing in just the input bits // Where "differ" means xor or subtraction // And the base value is random, or a counting value starting at that bit // The final result will have each bit of h0, h1 flip // For every input bit, // with probability 50 +- .3% (it is probably better than that) // For every pair of input bits, // with probability 50 +- .75% (the worst case is approximately that) // static GU_FORCE_INLINE void _spooky_short_end(uint64_t* h0, uint64_t* h1, uint64_t* h2, uint64_t* h3) { *h3 ^= *h2; *h2 = GU_ROTL64(*h2,15); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,52); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,26); *h1 += *h0; *h2 ^= *h1; *h1 = GU_ROTL64(*h1,51); *h2 += *h1; *h3 ^= *h2; *h2 = GU_ROTL64(*h2,28); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,9); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,47); *h1 += *h0; *h2 ^= *h1; *h1 = GU_ROTL64(*h1,54); *h2 += *h1; *h3 ^= *h2; *h2 = GU_ROTL64(*h2,32); *h3 += *h2; *h0 ^= *h3; *h3 = GU_ROTL64(*h3,25); *h0 += *h3; *h1 ^= *h0; *h0 = GU_ROTL64(*h0,63); *h1 += *h0; } /* 0x3/0x7 on 32/64-bit platforms respectively */ #define GU_SPOOKY_ALIGNMENT_MASK (GU_WORD_BYTES - 1) // // short hash ... it could be used on any message, // but it's used by Spooky just for short messages ( <= _spooky_bufSize ) // static GU_INLINE void gu_spooky_short_host( const void* const message, size_t const length, uint64_t* const hash) { union { const uint8_t* p8; uint32_t* p32; uint64_t* p64; uintptr_t i; } u; u.p8 = (const uint8_t *)message; #ifndef GU_ALLOW_UNALIGNED_READS uint64_t buf[_spooky_numVars << 1]; if (u.i & GU_SPOOKY_ALIGNMENT_MASK) { /* message unaligned, make aligned copy and point to it */ memcpy(buf, message, length); u.p64 = buf; } #endif /* GU_ALLOW_UNALIGNED_READS */ size_t remainder = length & 0x1F; /* length%32 */ /* author version : */ // uint64_t a = gu_le64(*hash[0]); // uint64_t b = gu_le64(*hash[1]); /* consistent seed version: */ uint64_t a = 0; uint64_t b = 0; uint64_t c = _spooky_const; uint64_t d = _spooky_const; if (length > 15) { const uint64_t *end = u.p64 + ((length >> 5) << 2); /* (length/32)*4 */ // handle all complete sets of 32 bytes for (; u.p64 < end; u.p64 += 4) { c += gu_le64(u.p64[0]); d += gu_le64(u.p64[1]); _spooky_short_mix(&a, &b, &c, &d); a += gu_le64(u.p64[2]); b += gu_le64(u.p64[3]); } //Handle the case of 16+ remaining bytes. if (remainder >= 16) { c += gu_le64(u.p64[0]); d += gu_le64(u.p64[1]); _spooky_short_mix(&a, &b, &c, &d); u.p64 += 2; remainder -= 16; } } // Handle the last 0..15 bytes, and its length d = ((uint64_t)length) << 56; switch (remainder) { case 15: d += ((uint64_t)u.p8[14]) << 48; // fall through case 14: d += ((uint64_t)u.p8[13]) << 40; // fall through case 13: d += ((uint64_t)u.p8[12]) << 32; // fall through case 12: d += gu_le32(u.p32[2]); c += gu_le64(u.p64[0]); break; case 11: d += ((uint64_t)u.p8[10]) << 16; // fall through case 10: d += ((uint64_t)u.p8[9]) << 8; // fall through case 9: d += (uint64_t)u.p8[8]; // fall through case 8: c += gu_le64(u.p64[0]); break; case 7: c += ((uint64_t)u.p8[6]) << 48; // fall through case 6: c += ((uint64_t)u.p8[5]) << 40; // fall through case 5: c += ((uint64_t)u.p8[4]) << 32; // fall through case 4: c += gu_le32(u.p32[0]); break; case 3: c += ((uint64_t)u.p8[2]) << 16; // fall through case 2: c += ((uint64_t)u.p8[1]) << 8; // fall through case 1: c += (uint64_t)u.p8[0]; break; case 0: c += _spooky_const; d += _spooky_const; } _spooky_short_end(&a, &b, &c, &d); // @note - in native-endian order! hash[0] = a; hash[1] = b; } static GU_FORCE_INLINE void gu_spooky_short( const void* const message, size_t const length, uint64_t* const hash) { uint64_t* const u64 = (uint64_t*)hash; gu_spooky_short_host(message, length, u64); u64[0] = gu_le64(u64[0]); u64[1] = gu_le64(u64[1]); } // do the whole hash in one call static GU_INLINE void gu_spooky_inline ( const void* const message, size_t const length, uint64_t* const hash) { #ifdef GU_USE_SPOOKY_SHORT if (length < _spooky_bufSize) { gu_spooky_short_base (message, length, hash); return; } #endif /* GU_USE_SPOOKY_SHORT */ uint64_t h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11; uint64_t buf[_spooky_numVars]; uint64_t* end; union { const uint8_t* p8; uint64_t* p64; uintptr_t i; } u; size_t remainder; /* this is how the author wants it: a possibility for different seeds h0=h3=h6=h9 = gu_le64(hash[0]); h1=h4=h7=h10 = gu_le64(hash[1]); * this is how we want it - constant seed */ h0=h3=h6=h9 = 0; h1=h4=h7=h10 = 0; h2=h5=h8=h11 = _spooky_const; u.p8 = (const uint8_t*) message; end = u.p64 + (length/_spooky_blockSize)*_spooky_numVars; // handle all whole _spooky_blockSize blocks of bytes #ifndef GU_ALLOW_UNALIGNED_READS if ((u.i & GU_SPOOKY_ALIGNMENT_MASK) == 0) { #endif /* GU_ALLOW_UNALIGNED_READS */ while (u.p64 < end) { _spooky_mix(u.p64, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); u.p64 += _spooky_numVars; } #ifndef GU_ALLOW_UNALIGNED_READS } else { while (u.p64 < end) { memcpy(buf, u.p64, _spooky_blockSize); _spooky_mix(buf, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); u.p64 += _spooky_numVars; } } #endif /* GU_ALLOW_UNALIGNED_READS */ // handle the last partial block of _spooky_blockSize bytes remainder = (length - ((const uint8_t*)end - (const uint8_t*)message)); memcpy(buf, end, remainder); memset(((uint8_t*)buf) + remainder, 0, _spooky_blockSize - remainder); ((uint8_t*)buf)[_spooky_blockSize - 1] = remainder; _spooky_mix(buf, &h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); // do some final mixing _spooky_end(&h0,&h1,&h2,&h3,&h4,&h5,&h6,&h7,&h8,&h9,&h10,&h11); /*! @note: in native order */ hash[0] = h0; hash[1] = h1; } /* As is apparent from the gu_spooky_inline(), Spooky hash is enormous. * Since it has advantage only on long messages, it makes sense to make it * a regular function to avoid code bloat. * WARNING: does not do final endian conversion! */ extern void gu_spooky128_host (const void* const msg, size_t const len, uint64_t* res); /* returns hash in the canonical byte order, as a byte array */ static GU_FORCE_INLINE void gu_spooky128 (const void* const msg, size_t const len, void* const res) { uint64_t* const r = (uint64_t*)res; gu_spooky128_host (msg, len, r); r[0] = gu_le64(r[0]); r[1] = gu_le64(r[1]); } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint64_t gu_spooky64 (const void* const msg, size_t const len) { uint64_t res[2]; gu_spooky128_host (msg, len, res); return res[0]; } /* returns hash as an integer, in host byte-order */ static GU_FORCE_INLINE uint32_t gu_spooky32 (const void* const msg, size_t const len) { uint64_t res[2]; gu_spooky128_host (msg, len, res); return (uint32_t)res[0]; } #ifdef __cplusplus } #endif #endif /* _gu_spooky_h_ */ galera-4-26.4.25/galerautils/src/gu_regex.hpp000644 000164 177776 00000003444 15107057155 022142 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009 Codership Oy /** * @file Regular expressions parser based on POSIX regex functions in * * $Id$ */ #ifndef _gu_regex_hpp_ #define _gu_regex_hpp_ #include #include #include #include "gu_throw.hpp" namespace gu { class RegEx { regex_t regex; std::string strerror (int rc) const; public: /*! * @param expr regular expression string */ RegEx (const std::string& expr) : regex() { int rc; if ((rc = regcomp(®ex, expr.c_str(), REG_EXTENDED)) != 0) { gu_throw_fatal << "regcomp(" << expr << "): " << strerror(rc); } } ~RegEx () { regfree (®ex); } /*! * This class is to differentiate between an empty and unset strings. * @todo: find a proper name for it and move to gu_utils.hpp */ class Match { std::string value; bool set; public: Match() : value(), set(false) {} Match(const std::string& s) : value(s), set(true) {} // throws NotSet const std::string& str() const { if (set) return value; throw NotSet(); } bool is_set() const { return set; } }; /*! * @brief Matches given string * * @param str string to match with expression * @param num number of matches to return * * @return vector of matched substrings */ std::vector match (const std::string& str, size_t num) const; }; } #endif /* _gu_regex_hpp_ */ galera-4-26.4.25/galerautils/src/gu_logger.cpp000644 000164 177776 00000006575 15107057155 022312 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * This code is based on an excellent article at Dr.Dobb's: * http://www.ddj.com/cpp/201804215?pgno=1 */ #include #include #include #include #include #include "gu_logger.hpp" #include "gu_string_utils.hpp" // strsplit #include #include #include using std::string; using std::vector; using std::set; namespace gu { class DebugFilter { set filter; public: DebugFilter() : filter() { if (::getenv("LOGGER_DEBUG_FILTER")) { set_filter(::getenv("LOGGER_DEBUG_FILTER")); } } ~DebugFilter() {} void set_filter(const string& str) { vector dvec = gu::strsplit(str, ','); for (vector::const_iterator i = dvec.begin(); i != dvec.end(); ++i) { filter.insert(*i); } } size_t size() const { return filter.size(); } bool is_set(const string& str) const { return filter.find(str) != filter.end() || filter.find(str.substr(0, str.find_first_of(":"))) != filter.end(); } }; static DebugFilter debug_filter; void Logger::set_debug_filter(const string& str) { debug_filter.set_filter(str); } bool Logger::no_debug(const string& file, const string& func, const int line) { return debug_filter.size() > 0 && debug_filter.is_set(func) == false; } #ifndef _gu_log_h_ void Logger::enable_tstamp (bool yes) { do_timestamp = yes; } void Logger::enable_debug (bool yes) { if (yes) { max_level = LOG_DEBUG; } else { max_level = LOG_INFO; } } void Logger::default_logger (int lvl, const char* msg) { fputs (msg, stderr); fputc ('\n', stderr); fflush (stderr); } void Logger::set_logger (LogCallback cb) { if (0 == cb) { logger = default_logger; } else { logger = cb; } } static const char* level_str[LOG_MAX] = { "FATAL: ", "ERROR: ", " WARN: ", " INFO: ", "DEBUG: " }; bool Logger::do_timestamp = false; LogLevel Logger::max_level = LOG_INFO; LogCallback Logger::logger = default_logger; #else #define do_timestamp gu_log_self_tstamp == true #define level_str gu_log_level_str #endif // _gu_log_h_ void Logger::prepare_default() { if (do_timestamp) { using namespace std; struct tm date; struct timeval time; gettimeofday (&time, NULL); localtime_r (&time.tv_sec, &date); os << date.tm_year + 1900 << '-' << setw(2) << setfill('0') << (date.tm_mon + 1) << '-' << setw(2) << setfill('0') << date.tm_mday << ' ' << setw(2) << setfill('0') << date.tm_hour << ':' << setw(2) << setfill('0') << date.tm_min << ':' << setw(2) << setfill('0') << date.tm_sec << '.' << setw(3) << setfill('0') << (time.tv_usec / 1000) << ' '; } os << level_str[level]; } } galera-4-26.4.25/galerautils/src/gu_digest.hpp000644 000164 177776 00000012657 15107057155 022315 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file Message digest interface. * * $Id$ */ #ifndef GU_DIGEST_HPP #define GU_DIGEST_HPP #include "gu_hash.h" #include "gu_byteswap.hpp" #include "gu_serializable.hpp" #include "gu_macros.hpp" namespace gu { /* Just making MMH3 not derive from Digest reduced TrxHandle size from * 4560 bytes to 4256. 304 bytes of vtable pointers... */ class MMH3 { public: MMH3 () : ctx_() { gu_mmh128_init (&ctx_); } ~MMH3 () {} template static int digest (const void* const in, size_t const size, T& out) { byte_t tmp[16]; gu_mmh128(in, size, tmp); int const s(std::min(sizeof(T), sizeof(tmp))); ::memcpy (&out, tmp, s); return s; } /* experimental */ template static T digest (const void* const in, size_t const size) { switch (sizeof(T)) { case 1: return gu_mmh128_32(in, size); case 2: return gu_mmh128_32(in, size); case 4: return gu_mmh128_32(in, size); case 8: return gu_mmh128_64(in, size); } throw; } void append (const void* const buf, size_t const size) { gu_mmh128_append (&ctx_, buf, size); } template int gather (void* const buf) const { GU_COMPILE_ASSERT(size >= 16, wrong_buf_size); gather16 (buf); return 16; } int gather (void* const buf, size_t const size) const { byte_t tmp[16]; gather16(tmp); int const s(std::min(size, sizeof(tmp))); ::memcpy (buf, tmp, s); return s; } void gather16 (void* const buf) const { gu_mmh128_get (&ctx_, buf); } uint64_t gather8() const { return gu_mmh128_get64 (&ctx_); } uint32_t gather4() const { return gu_mmh128_get32 (&ctx_); } // a questionable feature template int operator() (T& out) const { return gather(&out); } private: gu_mmh128_ctx_t ctx_; }; /* class MMH3 */ template <> inline int MMH3::digest (const void* const in, size_t const size, uint8_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t const size, uint16_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t const size, uint32_t& out) { out = gu_mmh128_32(in, size); return sizeof(out); } template <> inline int MMH3::digest (const void* const in, size_t const size, uint64_t& out) { out = gu_mmh128_64(in, size); return sizeof(out); } template <> inline int MMH3::gather<8> (void* const out) const { *(static_cast(out)) = gather8(); return 8; } template <> inline int MMH3::gather<4> (void* const out) const { *(static_cast(out)) = gather4(); return 4; } typedef MMH3 Hash; class FastHash { public: template static int digest (const void* const in, size_t const size, T& out) { byte_t tmp[16]; gu_fast_hash128(in, size, tmp); int const s(std::min(sizeof(T), sizeof(tmp))); ::memcpy (&out, tmp, s); return s; } /* experimental */ template static T digest (const void* const in, size_t const size); /* The above is undefined and should cause linking error in case that * template gets instantiated instead of specialized ones below. * Unfortunately GU_COMPILE_ASSERT() is unusable here - causes compilation * errors in every unit that only includes this header (probably because * method is static). * Perhaps templating the class would have done the trick */ }; /* FastHash */ template <> inline int FastHash::digest (const void* const in, size_t const size, uint8_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t const size, uint16_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t const size, uint32_t& out) { out = gu_fast_hash32(in, size); return sizeof(out); } template <> inline int FastHash::digest (const void* const in, size_t const size, uint64_t& out) { out = gu_fast_hash64(in, size); return sizeof(out); } template <> inline uint8_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline uint16_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline uint32_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline uint64_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash64(in, size); } template <> inline int8_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline int16_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline int32_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash32(in, size); } template <> inline int64_t FastHash::digest(const void* const in, size_t const size) { return gu_fast_hash64(in, size); } } /* namespace gu */ #endif /* GU_DIGEST_HPP */ galera-4-26.4.25/galerautils/src/gu_mmap.hpp000644 000164 177776 00000001156 15107057155 021760 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #ifndef __GCACHE_MMAP__ #define __GCACHE_MMAP__ #include "gu_fdesc.hpp" namespace gu { class MMap { public: size_t const size; void* const ptr; MMap (const FileDescriptor& fd, bool sequential = false); ~MMap (); void dont_need() const; void sync(void *addr, size_t length) const; void sync() const; void unmap(); private: bool mapped; // This class is definitely non-copyable MMap (const MMap&); MMap& operator = (const MMap); }; } /* namespace gu */ #endif /* __GCACHE_MMAP__ */ galera-4-26.4.25/galerautils/src/gu_buffer.hpp000644 000164 177776 00000004420 15107057155 022274 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy */ /*! * Byte buffer class. This is thin wrapper to std::vector */ #ifndef GU_BUFFER_HPP #define GU_BUFFER_HPP #include "gu_types.hpp" // for gu::byte_t #include "gu_shared_ptr.hpp" #include #include namespace gu { /* * Utility class for data buffers with vector like interface. * * Additionally provides data() method to access underlying * data array. The call to data() is always valid, even if * the buffer is empty. */ class Buffer { public: typedef std::vector buffer_type; typedef buffer_type::iterator iterator; typedef buffer_type::const_iterator const_iterator; typedef buffer_type::difference_type difference_type; Buffer() : buf_() { } Buffer(size_t size) : buf_(size) { } template Buffer(InputIt first, InputIt last) : buf_(first, last) { } iterator begin() { return buf_.begin(); } iterator end() { return buf_.end(); } const_iterator begin() const { return buf_.begin(); } const_iterator end() const { return buf_.end(); } void insert(iterator pos, byte_t value) { buf_.insert(pos, value); } template void insert(iterator pos, InputIt first, InputIt last) { buf_.insert(pos, first, last); } byte_t& operator[](size_t i) { assert(i < buf_.size()); return buf_[i]; } const byte_t& operator[](size_t i) const { assert(i < buf_.size()); return buf_[i]; } const byte_t* data() const { return (empty() ? 0 : &buf_[0]); } void resize(size_t size) { buf_.resize(size); } void reserve(size_t size) { buf_.reserve(size); } void clear() { buf_.clear(); } bool empty() const { return buf_.empty(); } size_t size() const { return buf_.size(); } bool operator==(const Buffer& other) const { return (buf_ == other.buf_); } private: std::vector buf_; }; typedef gu::shared_ptr::type SharedBuffer; } #endif // GU_BUFFER_HPP galera-4-26.4.25/galerautils/src/gu_fdesc.hpp000644 000164 177776 00000002542 15107057155 022112 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2016 Codership Oy * * $Id$ */ #ifndef __GU_FDESC_HPP__ #define __GU_FDESC_HPP__ #include "gu_exception.hpp" #include "gu_types.hpp" // for off_t, byte_t #include namespace gu { class FileDescriptor { public: /* open existing file */ FileDescriptor (const std::string& fname, bool sync = true); /* (re)create file */ FileDescriptor (const std::string& fname, size_t length, bool allocate = true, bool sync = true); ~FileDescriptor (); int get() const { return fd_; } const std::string& name() const { return name_; } off_t size() const { return size_; } void sync() const; void unlink() const { ::unlink (name_.c_str()); } private: std::string const name_; int const fd_; off_t const size_; bool const sync_; // sync on close bool write_byte (off_t offset); void write_file (off_t start = 0); void prealloc (off_t start = 0); void constructor_common(); FileDescriptor (const FileDescriptor&); FileDescriptor& operator = (const FileDescriptor); }; } /* namespace gu */ #endif /* __GU_FDESC_HPP__ */ galera-4-26.4.25/galerautils/src/gu_asio_stream_engine.hpp000644 000164 177776 00000006767 15107057155 024676 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // #ifndef GU_ASIO_STREAM_ENGINE_HPP #define GU_ASIO_STREAM_ENGINE_HPP /** @file gu_asio_stream_engine.hpp * * Interface definition for reactive stream processing engine. */ #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio.hpp" #include "wsrep_tls_service.h" #include #include namespace gu { class AsioIoService; // Stream processing engine interface. class AsioStreamEngine { public: enum op_status { /** Operation completed successfully. */ success = 0, /** * Operation completed successfully, but the stream * processing engine wants to read more. */ want_read, /** * Operation completed successfully, but the stream * processing engine wants to write more. */ want_write, /** * Stream end of file was encountered. */ eof, /** * Error was encountered. */ error }; struct op_result { /** Status code of the operation or negative error number. */ op_status status; /** Bytes transferred from/to given buffer during the operation. */ size_t bytes_transferred; }; virtual ~AsioStreamEngine() { } AsioStreamEngine(const AsioStreamEngine&) = delete; AsioStreamEngine& operator=(const AsioStreamEngine&) = delete; /** * Return scheme string corresponding to underlying engine. */ virtual std::string scheme() const = 0; /** * Used to assign file descriptor to engines which were * dependency injected when AsioStreamReact was constructured. * This should be never called for engines which were created * internally, did not override assign_fd() and fd was provided * during construction. Keeping assert to detect violations of * this convention. */ virtual void assign_fd(int fd) { assert(0); } virtual enum op_status client_handshake() = 0; virtual enum op_status server_handshake() = 0; /** * Shut down the stream processing engine. This must however * not close the file descriptor passed on construction. */ virtual void shutdown() = 0; /** * * @param buf Buffer to read into. * @param max_count Maximum number of bytes to read into buf. * * @return op_result. */ virtual op_result read(void* buf, size_t max_count) = 0; /** * Write buffer. * */ virtual op_result write(const void* buf, size_t count) = 0; /** * Return last error code. */ virtual AsioErrorCode last_error() const = 0; /** * Make a new AsioStreamEngine. * * @param scheme Desired scheme for stream engine. * @param fd File descriptor associated to the stream. */ static std::shared_ptr make( AsioIoService&, const std::string& scheme, int fd, bool non_blocking); protected: AsioStreamEngine() { } }; std::ostream& operator<<(std::ostream&, enum AsioStreamEngine::op_status); } #endif // GU_ASIO_STREAM_ENGINE_HPP galera-4-26.4.25/galerautils/src/gu_crc.hpp000644 000164 177776 00000001316 15107057155 021573 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013 Codership Oy * * @file header for various CRC stuff * * $Id$ */ #ifndef GU_CRC_HPP #define GU_CRC_HPP #include "gu_crc32c.h" namespace gu { class CRC32C { public: CRC32C() : state_(GU_CRC32C_INIT) {} void append(const void* const data, size_t const size) { gu_crc32c_append (&state_, data, size); } uint32_t get() const { return gu_crc32c_get(state_); } uint32_t operator() () const { return get(); } static uint32_t digest(const void* const data, size_t const size) { return gu_crc32c(data, size); } private: gu_crc32c_t state_; }; /* class CRC32C */ } /* namespace gu */ #endif /* GU_CRC_HPP */ galera-4-26.4.25/galerautils/src/gu_macros.h000644 000164 177776 00000003674 15107057155 021761 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2013 Codership Oy /** * @file Miscellaneous macros * * $Id$ */ #ifndef _gu_macros_h_ #define _gu_macros_h_ /* * Platform-dependent macros */ #if defined(_MSC_VER) # define GU_NORETURN __declspec(noreturn) # define GU_INLINE __forceinline # define GU_FORCE_INLINE __forceinline # define GU_UNUSED # define GU_LONG(x) (x) # define GU_ULONG(x) (x) # define GU_LONG_LONG(x) (x) # define GU_ULONG_LONG(x) (x) # define GU_DEBUG_NORETURN #else /* !defined(_MSC_VER) */ # define GU_NORETURN __attribute__((noreturn)) # define GU_INLINE inline # define GU_FORCE_INLINE inline __attribute__((always_inline)) # define GU_UNUSED __attribute__((unused)) # define GU_LONG(x) (x##L) # define GU_ULONG(x) (x##LU) # define GU_LONG_LONG(x) (x##LL) # define GU_ULONG_LONG(x) (x##LLU) # ifndef __OPTIMIZE__ # define GU_DEBUG_NORETURN abort(); # else # define GU_DEBUG_NORETURN # endif #endif /* !defined(_MSC_VER) */ /* * End of paltform-dependent macros */ /* "Shamelessly stolen" (tm) goods from Linux kernel */ /* * min()/max() macros that also do * strict type-checking.. See the * "unnecessary" pointer comparison. */ #if 0 // typeof() is not in C99 #define GU_MAX(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #define GU_MIN(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #endif #define gu_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #if __GNUC__ >= 3 # define gu_likely(x) __builtin_expect((x), 1) # define gu_unlikely(x) __builtin_expect((x), 0) #else # define gu_likely(x) (x) # define gu_unlikely(x) (x) #endif /* returns minimum multiple of A that is >= S */ #define GU_ALIGN(S,A) ((((S) - 1)/(A) + 1)*(A)) #endif /* _gu_macros_h_ */ galera-4-26.4.25/galerautils/src/gu_histogram.cpp000644 000164 177776 00000004204 15107057155 023013 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #include "gu_histogram.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include "gu_string_utils.hpp" // strsplit() #include #include #include #include gu::Histogram::Histogram(const std::string& vals) : cnt_() { std::vector varr = gu::strsplit(vals, ','); for (std::vector::const_iterator i = varr.begin(); i != varr.end(); ++i) { double val; std::istringstream is(*i); is >> val; if (is.fail()) { gu_throw_fatal << "Parse error"; } if (cnt_.insert(std::make_pair(val, 0)).second == false) { gu_throw_fatal << "Failed to insert value: " << val; } } } void gu::Histogram::insert(const double val) { if (val < 0.0) { log_warn << "Negative value (" << val << "), discarding"; return; } // Returns element that has key greater to val, // the correct bin is one below that std::map::iterator i(cnt_.upper_bound(val)); if (i == cnt_.end()) { ++cnt_.rbegin()->second; } else if (i == cnt_.begin()) { log_warn << "value " << val << " below histogram range, discarding"; } else { --i; ++i->second; } } void gu::Histogram::clear() { for (std::map::iterator i = cnt_.begin(); i != cnt_.end(); ++i) { i->second = 0; } } std::ostream& gu::operator<<(std::ostream& os, const Histogram& hs) { std::map::const_iterator i, i_next; long long norm = 0; for (i = hs.cnt_.begin(); i != hs.cnt_.end(); ++i) { norm += i->second; } for (i = hs.cnt_.begin(); i != hs.cnt_.end(); i = i_next) { i_next = i; ++i_next; os << i->first << ":" << std::fabs(double(i->second)/double(norm)); if (i_next != hs.cnt_.end()) os << ","; } return os; } std::string gu::Histogram::to_string() const { std::ostringstream os; os << *this; return os.str(); } galera-4-26.4.25/galerautils/src/gu_disable_non_virtual_dtor.hpp000644 000164 177776 00000001512 15107057155 026075 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // // Note that there are no usual header guards because this header // may have to be included several times for compilation unit. /** * @file gu_disable_non_virtual_dtor.hpp * * This file accompanied with gu_enable_non_virtual_dtor.hpp * can be used to disable/enable -Wnon-virtual-dtor compiler warning * temporarily when it is not desirable to disable the warning completely * for compilation. * * This can be useful when using public inheritance from standard * library classes, especially std::enable_shared_from_this. */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Wnon-virtual-dtor" #endif galera-4-26.4.25/galerautils/src/gu_crc32c.h000644 000164 177776 00000005121 15107057155 021541 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013-2020 Codership Oy * * @file Interface to CRC-32C implementations * * $Id$ */ #ifndef _GU_CRC32C_H_ #define _GU_CRC32C_H_ #if defined(__cplusplus) extern "C" { #endif #include "gu_macros.h" #include // uint32_t #include // size_t /*! Call this to configure CRC32C to use the best available implementation */ extern void gu_crc32c_configure(); typedef uint32_t gu_crc32c_t; static gu_crc32c_t const GU_CRC32C_INIT = 0xFFFFFFFF; typedef gu_crc32c_t (*gu_crc32c_func_t) (gu_crc32c_t crc, const void* data, size_t length); extern gu_crc32c_func_t gu_crc32c_func; static GU_FORCE_INLINE void gu_crc32c_init (gu_crc32c_t* crc) { *crc = GU_CRC32C_INIT; } static GU_FORCE_INLINE void gu_crc32c_append (gu_crc32c_t* crc, const void* data, size_t size) { *crc = gu_crc32c_func (*crc, data, size); } static GU_FORCE_INLINE uint32_t gu_crc32c_get (gu_crc32c_t crc) { return (~(crc)); } static GU_FORCE_INLINE uint32_t gu_crc32c (const void* data, size_t size) { return (~(gu_crc32c_func (GU_CRC32C_INIT, data, size))); } /* Portable software-only CRC32-C implementations for gu_crc32c_func */ extern gu_crc32c_t gu_crc32c_sarwate (gu_crc32c_t state, const void* data, size_t length); extern gu_crc32c_t gu_crc32c_slicing_by_4(gu_crc32c_t state, const void* data, size_t length); extern gu_crc32c_t gu_crc32c_slicing_by_8(gu_crc32c_t state, const void* data, size_t length); #if !defined(GU_CRC32C_NO_HARDWARE) #if defined(__x86_64) || defined(_M_AMD64) || defined(_M_X64) #define GU_CRC32C_X86_64 #endif #if defined(GU_CRC32C_X86_64) || defined(__i386) || defined(_M_X86) #define GU_CRC32C_X86 #endif #if defined(GU_CRC32C_X86) /* x86-based CRC32-C implementations for gu_crc32c_func */ extern gu_crc32c_t gu_crc32c_x86(gu_crc32c_t state, const void* data, size_t length); #if defined(GU_CRC32C_X86_64) extern gu_crc32c_t gu_crc32c_x86_64(gu_crc32c_t state, const void* data, size_t length); #endif /* GU_CRC32C_X86_64 */ #endif /* GU_CRC32C_X86 */ #if defined(__aarch64__) || defined(__AARCH64__) #define GU_CRC32C_ARM64 extern gu_crc32c_t gu_crc32c_arm64(gu_crc32c_t state, const void* data, size_t length); #endif /* __aarch64__ || __AARCH64__ */ #if defined(GU_CRC32C_X86) || defined(GU_CRC32C_ARM64) /** Returns hardware-accelerated CRC32C implementation */ extern gu_crc32c_func_t gu_crc32c_hardware(); #else #define GU_CRC32C_NO_HARDWARE 1 #endif #endif /* !GU_CRC32C_NO_HARDWARE */ #if defined(__cplusplus) } #endif #endif /* _GU_CRC32C_H_ */ galera-4-26.4.25/galerautils/src/gu_lock.hpp000644 000164 177776 00000003167 15107057155 021762 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy * */ #ifndef __GU_LOCK__ #define __GU_LOCK__ #include "gu_exception.hpp" #include "gu_logger.hpp" #include "gu_mutex.hpp" #include "gu_cond.hpp" #include "gu_datetime.hpp" #include #include namespace gu { class Lock { const Mutex& mtx_; Lock (const Lock&); Lock& operator=(const Lock&); public: Lock (const Mutex& mtx) : mtx_(mtx) { mtx_.lock(); } virtual ~Lock () { mtx_.unlock(); } inline void wait (const Cond& cond) { #ifdef GU_MUTEX_DEBUG mtx_.locked_ = false; mtx_.disown(); #endif /* GU_MUTEX_DEBUG */ cond.ref_count++; gu_cond_wait (&(cond.cond), &mtx_.impl()); // never returns error cond.ref_count--; #ifdef GU_MUTEX_DEBUG mtx_.locked_ = true; mtx_.owned_ = gu_thread_self(); #endif /* GU_MUTEX_DEBUG */ } inline void wait (const Cond& cond, const datetime::Date& date) { timespec ts; date._timespec(ts); #ifdef GU_MUTEX_DEBUG mtx_.locked_ = false; mtx_.disown(); #endif /* GU_MUTEX_DEBUG */ cond.ref_count++; int const ret(gu_cond_timedwait (&(cond.cond), &mtx_.impl(), &ts)); cond.ref_count--; #ifdef GU_MUTEX_DEBUG mtx_.locked_ = true; mtx_.owned_ = gu_thread_self(); #endif /* GU_MUTEX_DEBUG */ if (gu_unlikely(ret)) gu_throw_system_error(ret); } }; } #endif /* __GU_LOCK__ */ galera-4-26.4.25/galerautils/src/gu_alloc.cpp000644 000164 177776 00000011242 15107057155 022110 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2016 Codership Oy */ /*! * @file allocator main functions * * $Id$ */ #include "gu_alloc.hpp" #include "gu_throw.hpp" #include "gu_assert.hpp" #include "gu_arch.h" #include "gu_limits.h" #include #include // for std::setfill() and std::setw() gu::Allocator::HeapPage::HeapPage (page_size_type const size) : Page (static_cast(::malloc(size)), size) { assert(0 == (uintptr_t(base_ptr_) % GU_WORD_BYTES)); if (0 == base_ptr_) gu_throw_error (ENOMEM); } gu::Allocator::Page* gu::Allocator::HeapStore::my_new_page (page_size_type const size) { if (gu_likely(size <= left_)) { /* to avoid too frequent allocation, make it (at least) 64K */ static page_size_type const PAGE_SIZE(gu_page_size_multiple(1 << 16)); page_size_type const page_size (std::min(std::max(size, PAGE_SIZE), left_)); Page* ret = new HeapPage (page_size); assert (ret != 0); left_ -= page_size; return ret; } gu_throw_error (ENOMEM) << "out of memory in RAM pool"; } gu::Allocator::FilePage::FilePage (const std::string& name, page_size_type const size) : Page (0, 0), fd_ (name, size, false, false), mmap_(fd_, true) { base_ptr_ = static_cast(mmap_.ptr); assert(0 == (uintptr_t(base_ptr_) % GU_WORD_BYTES)); ptr_ = base_ptr_; left_ = mmap_.size; } gu::Allocator::Page* gu::Allocator::FileStore::my_new_page (page_size_type const size) { Page* ret = 0; try { std::ostringstream fname; fname << base_name_ << '.' << std::dec << std::setfill('0') << std::setw(6) << n_; ret = new FilePage(fname.str(), std::max(size, page_size_)); assert (ret != 0); ++n_; } catch (std::exception& e) { gu_throw_error(ENOMEM) << e.what(); } return ret; } #ifdef GU_ALLOCATOR_DEBUG void gu::Allocator::add_current_to_bufs() { page_size_type const current_size (current_page_->size()); if (current_size) { if (bufs_->empty() || bufs_->back().ptr != current_page_->base()) { Buf b = { current_page_->base(), current_size }; bufs_->push_back (b); } else { bufs_->back().size = current_size; } } } size_t gu::Allocator::gather (std::vector& out) const { if (bufs_().size()) out.insert (out.end(), bufs_().begin(), bufs_().end()); Buf b = { current_page_->base(), current_page_->size() }; out.push_back (b); return size_; } #endif /* GU_ALLOCATOR_DEBUG */ gu::byte_t* gu::Allocator::alloc (page_size_type const size, bool& new_page) { new_page = false; if (gu_unlikely(0 == size)) return 0; byte_t* ret = current_page_->alloc (size); if (gu_unlikely(0 == ret)) { Page* np = 0; try { np = current_store_->new_page(size); } catch (Exception& e) { if (current_store_ != &heap_store_) throw; /* no fallbacks left */ /* fallback to disk store */ current_store_ = &file_store_; np = current_store_->new_page(size); } assert (np != 0); // it should have thrown above pages_().push_back (np); #ifdef GU_ALLOCATOR_DEBUG add_current_to_bufs(); #endif /* GU_ALLOCATOR_DEBUG */ current_page_ = np; new_page = true; ret = np->alloc (size); assert (ret != 0); // the page should be sufficiently big } size_ += size; return ret; } gu::Allocator::BaseNameDefault const gu::Allocator::BASE_NAME_DEFAULT; gu::Allocator::Allocator (const BaseName& base_name, void* reserved, page_size_type reserved_size, heap_size_type max_ram, page_size_type disk_page_size) : first_page_ (reserved, reserved_size), current_page_ (&first_page_), heap_store_ (max_ram), file_store_ (base_name, disk_page_size), current_store_(&heap_store_), pages_ (), #ifdef GU_ALLOCATOR_DEBUG bufs_ (), #endif /* GU_ALLOCATOR_DEBUG */ size_ (0) { assert (NULL != reserved || 0 == reserved_size); assert (0 == (uintptr_t(reserved) % GU_WORD_BYTES)); assert (current_page_ != 0); pages_->push_back (current_page_); } gu::Allocator::~Allocator () { for (int i(pages_->size() - 1); i > 0 /* don't delete first_page_ - we didn't allocate it */; --i) { delete (pages_[i]); } } galera-4-26.4.25/galerautils/src/gu_debug_sync.cpp000644 000164 177776 00000002373 15107057155 023145 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2014 Codership Oy // #ifdef GU_DBUG_ON #include "gu_debug_sync.hpp" #include "gu_lock.hpp" #include namespace { gu::Mutex sync_mutex; typedef std::multimap SyncMap; SyncMap sync_waiters; } void gu_debug_sync_wait(const std::string& sync) { gu::Lock lock(sync_mutex); gu::Cond cond; log_debug << "enter sync wait '" << sync << "'"; SyncMap::iterator i( sync_waiters.insert(std::make_pair(sync, &cond))); lock.wait(cond); sync_waiters.erase(i); log_debug << "leave sync wait '" << sync << "'"; } void gu_debug_sync_signal(const std::string& sync) { gu::Lock lock(sync_mutex); std::pair range(sync_waiters.equal_range(sync)); for (SyncMap::iterator i(range.first); i != range.second; ++i) { log_debug << "signalling waiter"; i->second->signal(); } } std::string gu_debug_sync_waiters() { std::string ret; gu::Lock lock(sync_mutex); for (SyncMap::iterator i(sync_waiters.begin()); i != sync_waiters.end();) { ret += i->first; ++i; if (i != sync_waiters.end()) ret += " "; } return ret; } #endif // GU_DBUG_ON galera-4-26.4.25/galerautils/src/gu_crc32c_arm64.c000644 000164 177776 00000004604 15107057155 022552 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2020 Codership Oy */ /** * @file Hardware-accelerated implementation of CRC32C algorithm using arm64 * instructions. * * Defines gu_crc32c_hardware() that returns pointer to gu_crc32c_func_t if * available on a given CPU. */ #include "gu_crc32c.h" #if defined(GU_CRC32C_ARM64) #include "gu_log.h" #include #include #include static inline gu_crc32c_t crc32c_arm64_tail7(gu_crc32c_t state, const uint8_t* ptr, size_t len) { assert(len < 8); if (len >= 4) { state = __crc32cw(state, *(uint32_t *)ptr); ptr += 4; len -= 4; } switch (len) { case 3: state = __crc32cb(state, *ptr); ptr++; /* fall through */ case 2: state = __crc32ch(state, *(uint16_t*)ptr); break; case 1: state = __crc32cb(state, *ptr);; } return state; } gu_crc32c_t gu_crc32c_arm64(gu_crc32c_t state, const void* data, size_t len) { static size_t const arg_size = sizeof(uint64_t); const uint8_t* ptr = (const uint8_t*)data; /* apparently no ptr misalignment protection is needed */ while (len >= arg_size) { state = __crc32cd(state, *(uint64_t*)ptr); len -= arg_size; ptr += arg_size; } assert(len < 8); return crc32c_arm64_tail7(state, ptr, len); } #include #if defined(__FreeBSD__) /* Imitate getauxval() interface */ static unsigned long int getauxval(unsigned long int const type) { unsigned long int ret; if (0 != elf_aux_info(type, &ret, sizeof(ret))) ret = 0; return ret; } #endif /* FreeBSD */ #if defined(HWCAP_CRC32) # define GU_AT_HWCAP AT_HWCAP # define GU_HWCAP_CRC32 HWCAP_CRC32 #elif defined(HWCAP2_CRC32) # define GU_AT_HWCAP AT_HWCAP2 # define GU_HWCAP_CRC32 HWCAP2_CRC32 #endif /* HWCAP_CRC32 */ gu_crc32c_func_t gu_crc32c_hardware() { #if defined(GU_AT_HWCAP) unsigned long int const hwcaps = getauxval(GU_AT_HWCAP); if (hwcaps & GU_HWCAP_CRC32) { gu_info ("CRC-32C: using hardware acceleration."); return gu_crc32c_arm64; } else { gu_info ("CRC-32C: hardware does not have CRC-32C capabilities."); return NULL; } #else gu_info ("CRC-32C: compiled without hardware acceleration support."); return NULL; #endif /* GU_AT_HWCAP */ } #endif /* GU_CRC32C_ARM64 */ galera-4-26.4.25/galerautils/src/gu_resolver.cpp000644 000164 177776 00000034416 15107057155 022667 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2013 Codership Oy #include "gu_resolver.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include "gu_uri.hpp" #include #include #include // for close() #include #include #include #define BSD_COMP /* For SIOCGIFCONF et al on Solaris */ #include #include #include #if defined(__APPLE__) || defined(__FreeBSD__) # include # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP #else /* !__APPLE__ && !__FreeBSD__ */ extern "C" /* old style cast */ { static int const GU_SIOCGIFCONF = SIOCGIFCONF; static int const GU_SIOCGIFINDEX = SIOCGIFINDEX; } #endif /* !__APPLE__ && !__FreeBSD__ */ //using namespace std; using std::make_pair; // Map from scheme string to addrinfo class SchemeMap { public: typedef std::map Map; typedef Map::const_iterator const_iterator; SchemeMap() : ai_map() { ai_map.insert(make_pair("tcp", get_addrinfo(0, AF_UNSPEC, SOCK_STREAM, 0))); ai_map.insert(make_pair("ssl", get_addrinfo(0, AF_UNSPEC, SOCK_STREAM, 0))); ai_map.insert(make_pair("udp", get_addrinfo(0, AF_UNSPEC, SOCK_DGRAM, 0))); // TODO: } const_iterator find(const std::string& key) const { return ai_map.find(key); } const_iterator end() const { return ai_map.end(); } static const addrinfo* get_addrinfo(const_iterator i) { return &i->second; } private: Map ai_map; struct addrinfo get_addrinfo(int flags, int family, int socktype, int protocol) { struct addrinfo ret = { flags, family, socktype, protocol, #if defined(__FreeBSD__) 0, // FreeBSD gives ENOMEM error with non-zero value #else sizeof(struct sockaddr), #endif 0, 0, 0 }; return ret; } }; static SchemeMap scheme_map; // Helper to copy addrinfo structs. static void copy(const addrinfo& from, addrinfo& to) { to.ai_flags = from.ai_flags; to.ai_family = from.ai_family; to.ai_socktype = from.ai_socktype; to.ai_protocol = from.ai_protocol; to.ai_addrlen = from.ai_addrlen; if (from.ai_addr != 0) { if ((to.ai_addr = reinterpret_cast(malloc(to.ai_addrlen))) == 0) { gu_throw_fatal << "out of memory while trying to allocate " << to.ai_addrlen << " bytes"; } memcpy(to.ai_addr, from.ai_addr, to.ai_addrlen); } to.ai_canonname = 0; to.ai_next = 0; } ///////////////////////////////////////////////////////////////////////// // Sockaddr implementation ///////////////////////////////////////////////////////////////////////// bool gu::net::Sockaddr::is_multicast() const { switch (sa_->sa_family) { case AF_INET: return IN_MULTICAST(ntohl(reinterpret_cast(sa_)->sin_addr.s_addr)); case AF_INET6: return IN6_IS_ADDR_MULTICAST(&reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } bool gu::net::Sockaddr::is_anyaddr() const { switch (sa_->sa_family) { case AF_INET: return (ntohl(reinterpret_cast(sa_)->sin_addr.s_addr) == INADDR_ANY); case AF_INET6: return IN6_IS_ADDR_UNSPECIFIED(&reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } bool gu::net::Sockaddr::is_linklocal() const { switch (sa_->sa_family) { case AF_INET6: return IN6_IS_ADDR_LINKLOCAL( &reinterpret_cast(sa_)->sin6_addr); default: assert(0); return false; } } gu::net::Sockaddr::Sockaddr(const sockaddr* sa, socklen_t sa_len) : sa_ (0 ), sa_len_(sa_len) { if ((sa_ = reinterpret_cast(malloc(sa_len_))) == 0) { gu_throw_fatal; } memcpy(sa_, sa, sa_len_); } gu::net::Sockaddr::Sockaddr(const Sockaddr& s) : sa_ (0 ), sa_len_(s.sa_len_) { if ((sa_ = reinterpret_cast(malloc(sa_len_))) == 0) { gu_throw_fatal; } memcpy(sa_, s.sa_, sa_len_); } gu::net::Sockaddr::~Sockaddr() { free(sa_); } ///////////////////////////////////////////////////////////////////////// // MReq implementation ///////////////////////////////////////////////////////////////////////// static unsigned int get_ifindex_by_addr(const gu::net::Sockaddr& addr) { if (addr.is_anyaddr() == true) { return 0; } unsigned int idx(-1); int err(0); #if defined(__APPLE__) || defined(__FreeBSD__) struct ifaddrs *if_addrs = NULL; struct ifaddrs *if_addr = NULL; if (getifaddrs (&if_addrs) != 0) { err = errno; goto out; } for (if_addr = if_addrs; if_addr != NULL; if_addr = if_addr->ifa_next) { try { gu::net::Sockaddr sa (if_addr->ifa_addr, sizeof (struct sockaddr)); if (sa.get_family () == addr.get_family () && memcmp (sa.get_addr (), addr.get_addr (), addr.get_addr_len ()) == 0) { idx = if_nametoindex (if_addr->ifa_name); goto out; } } catch (gu::Exception& e) { } } out: # else /* !__APPLE__ && !__FreeBSD__ */ struct ifconf ifc; memset(&ifc, 0, sizeof(struct ifconf)); ifc.ifc_len = 16*sizeof(struct ifreq); std::vector ifr(16); ifc.ifc_req = &ifr[0]; int fd(socket(AF_INET, SOCK_DGRAM, 0)); if (fd == -1) { err = errno; gu_throw_system_error(err) << "could not create socket"; } if ((err = ioctl(fd, GU_SIOCGIFCONF, &ifc)) == -1) { err = errno; goto out; } log_debug << "read: " << ifc.ifc_len; for (size_t i(0); i < ifc.ifc_len/sizeof(struct ifreq); ++i) { struct ifreq* ifrp(&ifr[i]); try { log_debug << "read: " << ifrp->ifr_name; gu::net::Sockaddr sa(&ifrp->ifr_addr, sizeof(struct sockaddr)); if (sa.get_family() == addr.get_family() && memcmp(sa.get_addr(), addr.get_addr(), addr.get_addr_len()) == 0) { if ((err = ioctl(fd, GU_SIOCGIFINDEX, ifrp, sizeof(struct ifreq))) == -1) { err = errno; } #if defined(__linux__) || defined(__GNU__) idx = ifrp->ifr_ifindex; #elif defined(__sun__) || defined(__FreeBSD_kernel__) idx = ifrp->ifr_index; #else # error "Unsupported ifreq structure" #endif goto out; } } catch (gu::Exception& e) { } } out: close(fd); #endif /* !__APPLE__ && !__FreeBSD__ */ if (err != 0) { gu_throw_system_error(err) << "failed to get interface index"; } else { log_debug << "returning ifindex: " << idx; } return idx; } gu::net::MReq::MReq(const Sockaddr& mcast_addr, const Sockaddr& if_addr) : mreq_ ( 0), mreq_len_ ( 0), ipproto_ ( 0), add_membership_opt_ (-1), drop_membership_opt_(-1), multicast_if_opt_ (-1), multicast_loop_opt_ (-1), multicast_ttl_opt_ (-1) { log_debug << mcast_addr.get_family() << " " << if_addr.get_family(); if (mcast_addr.get_family() != if_addr.get_family()) { gu_throw_fatal << "address families do not match: " << mcast_addr.get_family() << ", " << if_addr.get_family(); } if (mcast_addr.get_family() != AF_INET && mcast_addr.get_family() != AF_INET6) { gu_throw_fatal << "Mreq: address family " << mcast_addr.get_family() << " not supported"; } get_ifindex_by_addr(if_addr); mreq_len_ = (mcast_addr.get_family() == AF_INET ? sizeof(struct ip_mreq) : sizeof(struct ipv6_mreq)); if ((mreq_ = malloc(mreq_len_)) == 0) { gu_throw_fatal << "could not allocate memory"; } memset(mreq_, 0, mreq_len_); switch (mcast_addr.get_family()) { case AF_INET: { struct ip_mreq* mr(reinterpret_cast(mreq_)); mr->imr_multiaddr.s_addr = *reinterpret_cast(mcast_addr.get_addr()); mr->imr_interface.s_addr = *reinterpret_cast(if_addr.get_addr()); ipproto_ = IPPROTO_IP; add_membership_opt_ = IP_ADD_MEMBERSHIP; drop_membership_opt_ = IP_DROP_MEMBERSHIP; multicast_if_opt_ = IP_MULTICAST_IF; multicast_loop_opt_ = IP_MULTICAST_LOOP; multicast_ttl_opt_ = IP_MULTICAST_TTL; break; } case AF_INET6: { struct ipv6_mreq* mr(reinterpret_cast(mreq_)); mr->ipv6mr_multiaddr = *reinterpret_cast(mcast_addr.get_addr()); mr->ipv6mr_interface = get_ifindex_by_addr(if_addr); ipproto_ = IPPROTO_IPV6; add_membership_opt_ = IPV6_ADD_MEMBERSHIP; drop_membership_opt_ = IPV6_DROP_MEMBERSHIP; multicast_loop_opt_ = IPV6_MULTICAST_LOOP; multicast_ttl_opt_ = IPV6_MULTICAST_HOPS; break; } } } gu::net::MReq::~MReq() { free(mreq_); } const void* gu::net::MReq::get_multicast_if_value() const { switch (ipproto_) { case IPPROTO_IP: return &reinterpret_cast(mreq_)->imr_interface; case IPPROTO_IPV6: return &reinterpret_cast(mreq_)->ipv6mr_interface; default: gu_throw_fatal << "get_multicast_if_value() not implemented for: " << ipproto_; } } int gu::net::MReq::get_multicast_if_value_size() const { switch (ipproto_) { case IPPROTO_IP: return sizeof(reinterpret_cast(mreq_)->imr_interface); case IPPROTO_IPV6: return sizeof(reinterpret_cast(mreq_)->ipv6mr_interface); default: gu_throw_fatal << "get_multicast_if_value_size() not implemented for: " << ipproto_; } } ///////////////////////////////////////////////////////////////////////// // Addrinfo implementation ///////////////////////////////////////////////////////////////////////// gu::net::Addrinfo::Addrinfo(const addrinfo& ai) : ai_() { copy(ai, ai_); } gu::net::Addrinfo::Addrinfo(const Addrinfo& ai) : ai_() { copy(ai.ai_, ai_); } gu::net::Addrinfo::Addrinfo(const Addrinfo& ai, const Sockaddr& sa) : ai_() { if (ai.get_addrlen() != sa.get_sockaddr_len()) { gu_throw_fatal; } copy(ai.ai_, ai_); memcpy(ai_.ai_addr, &sa.get_sockaddr(), ai_.ai_addrlen); } gu::net::Addrinfo::~Addrinfo() { free(ai_.ai_addr); } std::string gu::net::Addrinfo::to_string() const { static const size_t max_addr_str_len = (6 /* tcp|udp:// */ + INET6_ADDRSTRLEN + 2 /* [] */ + 6 /* :portt */); std::string ret; ret.reserve(max_addr_str_len); Sockaddr addr(ai_.ai_addr, ai_.ai_addrlen); switch (get_socktype()) { case SOCK_STREAM: ret += "tcp://"; break; case SOCK_DGRAM: ret += "udp://"; break; default: gu_throw_error(EINVAL) << "invalid socktype: " << get_socktype(); } char dst[INET6_ADDRSTRLEN + 1]; if (inet_ntop(get_family(), addr.get_addr(), dst, sizeof(dst)) == 0) { gu_throw_system_error(errno) << "inet ntop failed"; } switch (get_family()) { case AF_INET: ret += dst; break; case AF_INET6: ret += "["; ret += dst; if (addr.is_linklocal()) { ret += "%"; ret += gu::to_string(addr.get_scope_id()); } ret += "]"; break; default: gu_throw_error(EINVAL) << "invalid address family: " << get_family(); } ret += ":" + gu::to_string(ntohs(addr.get_port())); ret.reserve(0); // free unused space if possible return ret; } ///////////////////////////////////////////////////////////////////////// // Public methods ///////////////////////////////////////////////////////////////////////// gu::net::Addrinfo gu::net::resolve(const URI& uri) { SchemeMap::const_iterator i(scheme_map.find(uri.get_scheme())); if (i == scheme_map.end()) { gu_throw_error(EINVAL) << "invalid scheme: " << uri.get_scheme(); } try { std::string host(uri.get_host()); // remove [] if this is IPV6 address size_t pos(host.find_first_of('[')); if (pos != std::string::npos) { host.erase(pos, pos + 1); pos = host.find_first_of(']'); if (pos == std::string::npos) { gu_throw_error(EINVAL) << "invalid host: " << uri.get_host(); } host.erase(pos, pos + 1); } int err; addrinfo* ai(0); try { err = getaddrinfo(host.c_str(), uri.get_port().c_str(), SchemeMap::get_addrinfo(i), &ai); } catch (NotSet&) { err = getaddrinfo(host.c_str(), NULL, SchemeMap::get_addrinfo(i), &ai); } if (err != 0) { // Use EHOSTUNREACH as generic error number in case errno // is zero. Real error should be apparent from exception message gu_throw_error(errno == 0 ? EHOSTUNREACH : errno) << "getaddrinfo failed with error '" << gai_strerror(err) << "' (" << err << ") for " << uri.to_string(); } // Assume that the first entry is ok Addrinfo ret(*ai); freeaddrinfo(ai); return ret; } catch (NotFound& nf) { gu_throw_error(EINVAL) << "invalid URI: " << uri.to_string(); } } galera-4-26.4.25/galerautils/src/gu_assert.hpp000644 000164 177776 00000001276 15107057155 022332 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009 Codership Oy /** * @file Assert macro definition * * $Id$ */ #ifndef _gu_assert_hpp_ #define _gu_assert_hpp_ #ifndef DEBUG_ASSERT #include #else #include #undef assert #include "gu_logger.hpp" /** Assert that sleeps instead of aborting the program, saving it for gdb */ #define assert(expr) \ if (!(expr)) { \ log_fatal << "Assertion (" << __STRING(expr) << ") failed"; \ while(1) sleep(1); \ } #endif /* DEBUG_ASSERT */ #endif /* _gu_assert_hpp_ */ galera-4-26.4.25/galerautils/src/gu_vlq.hpp000644 000164 177776 00000011314 15107057155 021625 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2011-2013 Codership Oy // //! // @file Variable-length quantity encoding for integers // // Unsigned integers: Implementation uses using unsigned LEB128, // see for example http://en.wikipedia.org/wiki/LEB128. // // Signed integers: TODO // #ifndef GU_VLQ_HPP #define GU_VLQ_HPP #include "gu_buffer.hpp" #include "gu_throw.hpp" #include "gu_macros.h" #include #include #define GU_VLQ_CHECKS #define GU_VLQ_ALEX namespace gu { //! // @brief Retun number of bytes required to represent given value in ULEB128 // representation. // // @param value Unsigned value // // @return Number of bytes required for value representation // template inline size_t uleb128_size(UI value) { size_t i(1); value >>= 7; for (; value != 0; value >>= 7, ++i) {} return i; } //! // @brief Encode unsigned type to ULEB128 representation // // @param value // @param buf // @param buflen // @param offset // // @return Offset // template inline size_t uleb128_encode(UI value, byte_t* buf, size_t buflen, size_t offset) { #ifdef GU_VLQ_ALEX assert (offset < buflen); buf[offset] = value & 0x7f; while (value >>= 7) { buf[offset] |= 0x80; ++offset; #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #else assert(offset < buflen); #endif /* GU_VLQ_CHECKS */ buf[offset] = value & 0x7f; } return offset + 1; #else /* GU_VLQ_ALEX */ do { #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #else assert(offset < buflen); #endif /* GU_VLQ_CHECKS */ buf[offset] = value & 0x7f; value >>= 7; if (gu_unlikely(value != 0)) { buf[offset] |= 0x80; } ++offset; } while (value != 0); return offset; #endif /* GU_VLQ_ALEX */ } template inline size_t uleb128_encode(UI value, byte_t* buf, size_t buflen) { return uleb128_encode(value, buf, buflen, 0); } /* checks helper for the uleb128_decode() below */ extern void uleb128_decode_checks (const byte_t* buf, size_t buflen, size_t offset, size_t avail_bits); //! // @brief Decode unsigned type from ULEB128 representation // // @param buf // @param buflen // @param offset // @param value // // @return Offset // template inline size_t uleb128_decode(const byte_t* buf, size_t buflen, size_t offset, UI& value) { // initial check for overflow, at least one byte must be readable #ifdef GU_VLQ_CHECKS if (gu_unlikely(offset >= buflen)) gu_throw_fatal; #endif #ifdef GU_VLQ_ALEX value = buf[offset] & 0x7f; size_t shift(0); while (buf[offset] & 0x80) { ++offset; shift +=7; #ifdef GU_VLQ_CHECKS ssize_t left_bits((sizeof(UI) << 3) - shift); if (gu_unlikely(offset >= buflen || left_bits < 7)) uleb128_decode_checks (buf, buflen, offset, left_bits); #endif value |= (UI(buf[offset] & 0x7f) << shift); } return offset + 1; #else /* GU_VLQ_ALEX */ value = 0; size_t shift(0); while (true) { value |= (UI(buf[offset] & 0x7f) << shift); if (gu_likely((buf[offset] & 0x80) == 0)) { // last byte ++offset; break; } ++offset; shift += 7; #ifdef GU_VLQ_CHECKS ssize_t left_bits((sizeof(UI) << 3) - shift); if (gu_unlikely(offset >= buflen || left_bits < 7)) uleb128_decode_checks (buf, buflen, offset, left_bits); #endif } return offset; #endif /* GU_VLQ_ALEX */ } template inline size_t uleb128_decode(const byte_t* buf, size_t buflen, UI& value) { return uleb128_decode(buf, buflen, 0, value); } } #endif // GU_VLQ_HPP galera-4-26.4.25/galerautils/src/CMakeLists.txt000644 000164 177776 00000004511 15107057155 022360 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # # Logging facility is extracted into separate library to break # circular dependency between crc32c HW and galerautils libraries. add_library(galerautils_log STATIC gu_log.c) target_compile_options(galerautils_log PRIVATE -Wno-unused-parameter) # # Compile hardware optimized CRC32C code into separate library # to keep the optimized code isolated. # if (GALERA_CRC32C_X86_64) set(GALERAUTILS_HW_CRC32C_SOURCES gu_crc32c_x86.c) elseif (GALERA_CRC32C_ARM64) set(GALERAUTILS_HW_CRC32C_SOURCES gu_crc32c_arm64.c) endif() if (GALERAUTILS_HW_CRC32C_SOURCES) set(GALERAUTILS_HW_CRC32C_LIB galerautils_hw_crc32c) add_library(${GALERAUTILS_HW_CRC32C_LIB} STATIC ${GALERAUTILS_HW_CRC32C_SOURCES}) target_compile_options(${GALERAUTILS_HW_CRC32C_LIB} PRIVATE ${GALERA_CRC32C_COMPILER_FLAG}) target_link_libraries(${GALERAUTILS_HW_CRC32C_LIB} galerautils_log) endif() add_library(galerautils STATIC gu_abort.c gu_crc32c.c gu_dbug.c gu_fifo.c gu_lock_step.c gu_mem.c gu_mmh3.c gu_spooky.c gu_rand.c gu_threads.c gu_hexdump.c gu_to.c gu_utils.c gu_uuid.c gu_backtrace.c gu_limits.c gu_time.c gu_init.c ) # TODO: These should be eventually fixed: # - Wno-unused-parameter # # Suppress -Wself-assign which may emit unwanted warnings when # using byte swapping macros in way like k = gu_le64(k). # target_compile_options(galerautils PRIVATE -Wno-unused-parameter -Wno-declaration-after-statement -Wno-vla) target_link_libraries(galerautils galerautils_log ${GALERAUTILS_HW_CRC32C_LIB} ${GALERA_SYSTEM_LIBS}) add_library(galerautilsxx STATIC gu_vlq.cpp gu_datetime.cpp gu_gtid.cpp gu_event_service.cpp gu_exception.cpp gu_hexdump.cpp gu_serialize.cpp gu_logger.cpp gu_regex.cpp gu_string_utils.cpp gu_uri.cpp gu_buffer.cpp gu_utils++.cpp gu_config.cpp gu_fdesc.cpp gu_mmap.cpp gu_alloc.cpp gu_rset.cpp gu_resolver.cpp gu_histogram.cpp gu_signals.cpp gu_stats.cpp gu_asio.cpp gu_asio_datagram.cpp gu_asio_stream_engine.cpp gu_asio_stream_react.cpp gu_debug_sync.cpp gu_thread.cpp gu_uuid.cpp ) # TODO: Warnings should be fixed. target_compile_options(galerautilsxx PRIVATE -Wno-conversion -Wno-unused-parameter) target_link_libraries(galerautilsxx galerautils ${GALERA_SSL_LIBS}) galera-4-26.4.25/galerautils/src/gu_convert.hpp000644 000164 177776 00000007270 15107057155 022511 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009 Codership Oy /** * @file Routines for safe integer conversion * * $Id$ */ #ifndef _gu_convert_hpp_ #define _gu_convert_hpp_ #include "gu_macros.h" #include "gu_throw.hpp" #include namespace gu { /*! * Converts from type FROM to type TO with range checking. * Generic template is for the case sizeof(FROM) > sizeof(TO). * * @param from value to convert * @param to destination (provides type TO for template instantiation) * @return value cast to TO */ template inline TO convert (const FROM& from, const TO& to) { if (gu_unlikely(from > std::numeric_limits::max() || from < std::numeric_limits::min())) { // @todo: figure out how to print type name without RTTI gu_throw_error (ERANGE) << from << " is unrepresentable with " << (std::numeric_limits::is_signed ? "signed" : "unsigned") << " " << sizeof(TO) << " bytes (" << "min " << std::numeric_limits::min() << " max " << std::numeric_limits::max() << ")"; } return static_cast(from); } /* Specialized templates are for signed conversion */ template <> inline long long convert (const unsigned long long& from, const long long& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long long'"; } return static_cast(from); } template <> inline unsigned long long convert (const long long& from, const unsigned long long& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long long'"; } return static_cast(from); } template <> inline long convert (const unsigned long& from, const long& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long'"; } return static_cast(from); } template <> inline unsigned long convert (const long& from, const unsigned long& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long'"; } return static_cast(from); } template <> inline int convert (const unsigned int& from, const int& to) { if (gu_unlikely(from > static_cast (std::numeric_limits::max()))) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'long'"; } return static_cast(from); } template <> inline unsigned int convert (const int& from, const unsigned int& to) { if (gu_unlikely(from < 0)) { gu_throw_error (ERANGE) << from << " is unrepresentable with 'unsigned long'"; } return static_cast(from); } } #endif /* _gu_convert_hpp_ */ galera-4-26.4.25/galerautils/src/gu_inttypes.hpp000644 000164 177776 00000001102 15107057155 022674 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @file gu_inttypes.hpp * * A convenience header to include correct inttypes header from C++ * compilation units. * * Pre C++11: Define __STDC_FORMAT_MACROS required by older compilers * and include . * * C++11 and above: Include standard library header directly. */ #ifndef GU_INTTYPES_HPP #define GU_INTTYPES_HPP #if __cplusplus < 201103L #define __STDC_FORMAT_MACROS #include #else #include #endif // __cplusplus < 201103L #endif // GU_INTTYPES_HPP galera-4-26.4.25/galerautils/src/gu_debug_sync.hpp000644 000164 177776 00000002217 15107057155 023147 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2014 Codership Oy // // // Define -DGU_DBUG_ON to enable GU_DBUG macros // // Usage: // // GU_DBUG_SYNC_WAIT("sync_point_identifier") // // The macro above will block whenever "dbug=d,sync_point_identifier" // parameter has been passed to provider. // // Blocking waiters can be signalled by setting "signal=sync_point_identifier" // option. // // List of waiters can be monitored from wsrep debug_sync_waiters status // variable. // #ifndef GU_DEBUG_SYNC_HPP #define GU_DEBUG_SYNC_HPP #ifdef GU_DBUG_ON #include #include "gu_dbug.h" #define GU_DBUG_SYNC_WAIT(_c) \ GU_DBUG_EXECUTE(_c, gu_debug_sync_wait(_c);) #define GU_DBUG_SYNC_EXECUTE(_c,_cmd) \ GU_DBUG_EXECUTE(_c, _cmd); // Wait for sync signal identified by sync string void gu_debug_sync_wait(const std::string& sync); // Signal waiter identified by sync string void gu_debug_sync_signal(const std::string& sync); // Get list of active sync waiters std::string gu_debug_sync_waiters(); #else #define GU_DBUG_SYNC_WAIT(_c) #define GU_DBUG_SYNC_EXECUTE(_c,_cmd) #endif // GU_DBUG_ON #endif // GU_DEBUG_SYNC_HPP galera-4-26.4.25/galerautils/src/gu_string_utils.cpp000644 000164 177776 00000004476 15107057155 023557 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2010 Codership Oy #include "gu_string_utils.hpp" #include "gu_assert.hpp" #include using std::string; using std::vector; vector gu::strsplit(const string& s, char sep) { vector ret; size_t pos, prev_pos = 0; while ((pos = s.find_first_of(sep, prev_pos)) != string::npos) { ret.push_back(s.substr(prev_pos, pos - prev_pos)); prev_pos = pos + 1; } if (s.length() > prev_pos) { ret.push_back(s.substr(prev_pos, s.length() - prev_pos)); } return ret; } vector gu::tokenize(const string& s, const char sep, const char esc, const bool empty) { vector ret; size_t pos, prev_pos, search_pos; prev_pos = search_pos = 0; while ((pos = s.find_first_of(sep, search_pos)) != string::npos) { assert (pos >= prev_pos); if (esc != '\0' && pos > search_pos && esc == s[pos - 1]) { search_pos = pos + 1; continue; } if (pos > prev_pos || empty) { string t = s.substr(prev_pos, pos - prev_pos); // get rid of escapes size_t p, search_p = 0; while ((p = t.find_first_of(esc, search_p)) != string::npos && esc != '\0') { if (p > search_p) { t.erase(p, 1); search_p = p + 1; } } ret.push_back(t); } prev_pos = search_pos = pos + 1; } if (s.length() > prev_pos) { ret.push_back(s.substr(prev_pos, s.length() - prev_pos)); } else if (s.length() == prev_pos && empty) { assert(0 == prev_pos || s[prev_pos - 1] == sep); ret.push_back(""); } return ret; } void gu::trim (string& s) { const ssize_t s_length = s.length(); for (ssize_t begin = 0; begin < s_length; ++begin) { if (!isspace(s[begin])) { for (ssize_t end = s_length - 1; end >= begin; --end) { if (!isspace(s[end])) { s = s.substr(begin, end - begin + 1); return; } } assert(0); } } s.clear(); } galera-4-26.4.25/galerautils/src/gu_mem.c000644 000164 177776 00000010113 15107057155 021230 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy /** * Debugging versions of memmory functions * * $Id$ */ #include #include #include #include "gu_mem.h" #include "gu_log.h" /* Some global counters - can be inspected by gdb */ static volatile ssize_t gu_mem_total = 0; static volatile ssize_t gu_mem_allocs = 0; static volatile ssize_t gu_mem_reallocs = 0; static volatile ssize_t gu_mem_frees = 0; typedef struct mem_head { const char* file; unsigned int line; size_t used; size_t allocated; uint32_t signature; } mem_head_t; #define MEM_SIGNATURE 0x13578642 /**< Our special marker */ // Returns pointer to the first byte after the head structure #define TAIL(head) ((void*)((mem_head_t*)(head) + 1)) // Returns pointer to the head preceding tail #define HEAD(tail) ((mem_head_t*)(tail) - 1) void* gu_malloc_dbg (size_t size, const char* file, unsigned int line) { if (size) { size_t const total_size = size + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) malloc (total_size); if (ret) { gu_mem_total += total_size; gu_mem_allocs++; ret->signature = MEM_SIGNATURE; ret->allocated = total_size; ret->used = size; ret->file = file; ret->line = line; // cppcheck-suppress memleak return TAIL(ret); } } return NULL; } void* gu_calloc_dbg (size_t nmemb, size_t size, const char* file, unsigned int line) { if (size != 0 && nmemb != 0) { size_t const total_size = size*nmemb + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) calloc (total_size, 1); if (ret) { size_t const total_size = size*nmemb + sizeof(mem_head_t); gu_mem_total += total_size; gu_mem_allocs++; ret->signature = MEM_SIGNATURE; ret->allocated = total_size; ret->used = size; ret->file = file; ret->line = line; return TAIL(ret); } } return NULL; } void* gu_realloc_dbg (void* ptr, size_t size, const char* file, unsigned int line) { if (ptr) { if (size > 0) { mem_head_t* const old = HEAD(ptr); if (MEM_SIGNATURE != old->signature) { gu_error ("Attempt to realloc uninitialized pointer at " "file: %s, line: %d", file, line); assert (0); } size_t const total_size = size + sizeof(mem_head_t); mem_head_t* const ret = (mem_head_t*) realloc (old, total_size); if (ret) { gu_mem_reallocs++; gu_mem_total -= ret->allocated; // old size ret->allocated = total_size; gu_mem_total += ret->allocated; // new size ret->used = size; ret->file = file; ret->line = line; return TAIL(ret); } else { // realloc failed return NULL; } } else { gu_free_dbg (ptr, file, line); return NULL; } } else { return gu_malloc_dbg (size, file, line); } return NULL; } void gu_free_dbg (void* ptr, const char* file, unsigned int line) { mem_head_t* head; if (NULL == ptr) { gu_debug ("Attempt to free NULL pointer at file: %s, line: %d", file, line); return; /* As per specification - no operation is performed */ } head = HEAD(ptr); if (MEM_SIGNATURE != head->signature) { gu_error ("Attempt to free uninitialized pointer " "at file: %s, line: %d", file, line); assert (0); } if (0 == head->used) { gu_error ("Attempt to free pointer the second time at " "file: %s, line: %d. " "Was allocated at file: %s, line: %d.", file, line, head->file, head->line); assert (0); } gu_mem_total -= head->allocated; gu_mem_frees++; head->allocated = 0; head->used = 0; free (head); } void gu_mem_stats (ssize_t* total, ssize_t* allocs, ssize_t* reallocs, ssize_t* deallocs) { *total = gu_mem_total; *allocs = gu_mem_allocs; *reallocs = gu_mem_reallocs; *deallocs = gu_mem_frees; } galera-4-26.4.25/galerautils/src/gu_asio_datagram.hpp000644 000164 177776 00000003760 15107057155 023624 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @file gu_asio_datagram.hpp * * Datagram socket implementation. */ #ifndef GU_ASIO_DATAGRAM_HPP #define GU_ASIO_DATAGRAM_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio.hpp" #include "asio/ip/address.hpp" #include "asio/ip/udp.hpp" #include "gu_disable_non_virtual_dtor.hpp" #include "gu_compiler.hpp" namespace gu { // // UDP/Datagram wrapper // class AsioUdpSocket : public AsioDatagramSocket , public std::enable_shared_from_this { public: AsioUdpSocket(gu::AsioIoService& io_service); ~AsioUdpSocket() noexcept(false); asio::ip::udp::resolver::iterator resolve_and_open(const gu::URI& uri); virtual void open(const gu::URI& uri) GALERA_OVERRIDE; virtual void close() GALERA_OVERRIDE; virtual void connect(const gu::URI& uri) GALERA_OVERRIDE; virtual void write(const std::array& buffers) GALERA_OVERRIDE; virtual void send_to(const std::array& buffers, const AsioIpAddress& target_host, unsigned short target_port) GALERA_OVERRIDE; virtual void async_read( const AsioMutableBuffer& buffer, const std::shared_ptr& handler) GALERA_OVERRIDE; virtual std::string local_addr() const GALERA_OVERRIDE; // Async handlers void read_handler( const std::shared_ptr& handler, const asio::error_code& ec, size_t bytes_transferred); private: AsioIoService& io_service_; asio::ip::udp::socket socket_; asio::ip::udp::endpoint local_endpoint_; asio::ip::address local_if_; }; } #include "gu_enable_non_virtual_dtor.hpp" #endif // GU_ASIO_DATAGRAM_HPP galera-4-26.4.25/galerautils/src/gu_progress.hpp000644 000164 177776 00000011127 15107057155 022671 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2016-2021 Codership Oy */ #ifndef __GU_PROGRESS__ #define __GU_PROGRESS__ #include "gu_logger.hpp" #include "gu_datetime.hpp" #include #include #include namespace gu { template class Progress { public: class Callback { public: /** * @param total amount of work * @param done amount ot work */ virtual void operator ()(T total, T done) = 0; virtual ~Callback() {} }; private: Callback* const callback_; std::string const prefix_; std::string const units_; gu::datetime::Period const log_interval_; T const unit_interval_; T total_; T current_; T last_check_; T last_logged_; gu::datetime::Date last_log_time_; gu::datetime::Date last_cb_time_; unsigned char const total_digits_; void log(gu::datetime::Date const now) { log_info << prefix_ << "... " << std::fixed << std::setprecision(1) << (double(current_)/total_ * 100) << "% (" << current_ << '/' << total_ << units_ << ") complete."; last_log_time_ = now; last_logged_ = current_; } static std::string const DEFAULT_INTERVAL; // see definition below void cb(gu::datetime::Date const now) { (*callback_)(total_, current_); last_cb_time_ = now; } public: /* * Creates progress context and logs the beginning of the progress (0%) * * @param c a callback to call to report progress * @param p prefix to be printed in each log message * (include trailing space) * @param u units to be printed next to numbers(empty string - no units) * (include space between number and units) * @param t total amount of work in units * @param ui minimal unit interval to log progress * @param ti minimal time interval to log progress */ Progress(Callback* c, const std::string& p, const std::string& u, T const t, T const ui, const std::string& ti = DEFAULT_INTERVAL) : callback_ (c), prefix_ (p), units_ (u), log_interval_ (ti), unit_interval_(ui), total_ (t), current_ (0), last_check_ (current_), last_logged_ (), last_log_time_(), last_cb_time_ (), total_digits_ (::ceil(::log10(total_ + 1))) { gu::datetime::Date const now(gu::datetime::Date::monotonic()); if (callback_) cb(now); log(now); } /* On destruction log whatever progress was reached. */ ~Progress() { gu::datetime::Date const now(gu::datetime::Date::monotonic()); if (callback_) cb(now); if (last_logged_ != current_)log(now); } /* Increments progress by @increment. * If time limit is reached, logs the total progress */ void update(T const increment) { /* while we may want to limit the rate of logging the progress, * it still makes sense (for monitoring) to call the callback * much more frequently */ static gu::datetime::Period const cb_interval("PT0.5S"); current_ += increment; if (current_ - last_check_ >= unit_interval_) { gu::datetime::Date const now(gu::datetime::Date::monotonic()); if (callback_ && now - last_cb_time_ >= cb_interval) cb(now); if (now - last_log_time_ >= log_interval_) log(now); last_check_ = current_; /* last_*_time_ is updated in log() and cb() */ } } void update_total(T const increment) { total_ += increment; } /* mark progress as finished before object destruction */ void finish() { current_ = total_; } private: Progress(const Progress&); Progress& operator=(Progress); }; /* class Progress */ template std::string const Progress::DEFAULT_INTERVAL = "PT10S"; /* 10 sec */ } /* namespace gu */ #endif /* __GU_PROGRESS__ */ galera-4-26.4.25/galerautils/src/gu_uuid.c000644 000164 177776 00000012063 15107057155 021426 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2017 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #include "gu_uuid.h" #include "gu_byteswap.h" #include "gu_log.h" #include "gu_assert.h" #include "gu_threads.h" #include "gu_time.h" #include "gu_rand.h" #include // for rand_r() #include // for memcmp() #include // for fopen() et al #include // for gettimeofday() #include // for getpid() #include // for errno #include #define UUID_NODE_LEN 6 /** Returns 64-bit system time in 100 nanoseconds */ static uint64_t uuid_get_time () { static long long check = 0; static gu_mutex_t mtx = GU_MUTEX_INITIALIZER; long long t; gu_mutex_lock (&mtx); do { t = gu_time_calendar() / 100; } while (check == t); check = t; gu_mutex_unlock (&mtx); return (t + 0x01B21DD213814000LL); // offset since the start of 15 October 1582 } #ifndef UUID_URAND // This function can't be called too often, // apparently due to lack of entropy in the pool. /** Fills node part of the uuid with true random data from /dev/urand */ static int uuid_urand_node (uint8_t* node, size_t node_len) { static const char urand_name[] = "/dev/urandom"; FILE* urand; size_t i = 0; int c; urand = fopen (urand_name, "r"); if (NULL == urand) { gu_debug ("Failed to open %s for reading (%d).", urand_name, -errno); return -errno; } while (i < node_len && (c = fgetc (urand)) != EOF) { node[i] = (uint8_t) c; i++; } fclose (urand); return 0; } #else #define uuid_urand_node(a,b) true #endif /** Fills node part with pseudorandom data from rand_r() */ static void uuid_rand_node (uint8_t* node, size_t node_len) { unsigned int seed = gu_rand_seed_int (gu_time_calendar(), node, getpid()); size_t i; for (i = 0; i < node_len; i++) { uint32_t r = (uint32_t) rand_r (&seed); /* combine all bytes into the lowest byte */ node[i] = (uint8_t)((r) ^ (r >> 8) ^ (r >> 16) ^ (r >> 24)); } } static inline void uuid_fill_node (uint8_t* node, size_t node_len) { if (uuid_urand_node (node, node_len)) { uuid_rand_node (node, node_len); } } void gu_uuid_generate (gu_uuid_t* uuid, const void* node, size_t node_len) { GU_ASSERT_ALIGNMENT(*uuid); assert (NULL != uuid); assert (NULL == node || 0 != node_len); uint32_t* uuid32 = (uint32_t*) uuid->data; uint16_t* uuid16 = (uint16_t*) uuid->data; uint64_t uuid_time = uuid_get_time (); uint16_t clock_seq = gu_rand_seed_int (uuid_time, &GU_UUID_NIL, getpid()); /* time_low */ uuid32[0] = gu_be32 (uuid_time & 0xFFFFFFFF); /* time_mid */ uuid16[2] = gu_be16 ((uuid_time >> 32) & 0xFFFF); /* time_high_and_version */ uuid16[3] = gu_be16 (((uuid_time >> 48) & 0x0FFF) | (1 << 12)); /* clock_seq_and_reserved */ uuid16[4] = gu_be16 ((clock_seq & 0x3FFF) | 0x8000); /* node */ if (NULL != node && 0 != node_len) { memcpy (&uuid->data[10], node, node_len > UUID_NODE_LEN ? UUID_NODE_LEN : node_len); } else { uuid_fill_node (&uuid->data[10], UUID_NODE_LEN); uuid->data[10] |= 0x02; /* mark as "locally administered" */ } return; } /** * Compare two UUIDs * @return -1, 0, 1 if left is respectively less, equal or greater than right */ int gu_uuid_compare (const gu_uuid_t* left, const gu_uuid_t* right) { GU_ASSERT_ALIGNMENT(*left); GU_ASSERT_ALIGNMENT(*right); return memcmp (left, right, sizeof(gu_uuid_t)); } static uint64_t uuid_time (const gu_uuid_t* uuid) { uint64_t uuid_time; union { uint16_t u16[4]; uint32_t u32[2]; } tmp; memcpy(&tmp, uuid, sizeof(tmp)); /* time_high_and_version */ uuid_time = gu_be16(tmp.u16[3]) & 0x0FFF; /* time_mid */ uuid_time = (uuid_time << 16) + gu_be16(tmp.u16[2]); /* time_low */ uuid_time = (uuid_time << 32) + gu_be32(tmp.u32[0]); return uuid_time; } /** * Compare ages of two UUIDs * @return -1, 0, 1 if left is respectively younger, equal or older than right */ int gu_uuid_older (const gu_uuid_t* left, const gu_uuid_t* right) { GU_ASSERT_ALIGNMENT(*left); GU_ASSERT_ALIGNMENT(*right); uint64_t time_left = uuid_time (left); uint64_t time_right = uuid_time (right); if (time_left < time_right) return 1; if (time_left > time_right) return -1; return 0; } ssize_t gu_uuid_print(const gu_uuid_t* uuid, char* buf, size_t buflen) { GU_ASSERT_ALIGNMENT(*uuid); if (buflen < GU_UUID_STR_LEN) return -1; return sprintf(buf, GU_UUID_FORMAT, GU_UUID_ARGS(uuid)); } ssize_t gu_uuid_scan(const char* buf, size_t buflen, gu_uuid_t* uuid) { GU_ASSERT_ALIGNMENT(*uuid); ssize_t ret; if (buflen < GU_UUID_STR_LEN) return -1; ret = sscanf(buf, GU_UUID_FORMAT_SCANF, GU_UUID_ARGS_SCANF(uuid)); if (ret != sizeof(uuid->data)) return -1; return ret; } galera-4-26.4.25/galerautils/src/gu_throw.hpp000644 000164 177776 00000006770 15107057155 022200 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy * * $Id$ */ /*! * @file Classes to allow throwing more verbose exceptions. Should be only * used from one-line macros below. Concrete classes intended to be final. */ #ifndef __GU_THROW__ #define __GU_THROW__ #include #include #include #include #include "gu_macros.hpp" #include "gu_exception.hpp" namespace gu { /*! "base" class */ class ThrowBase { protected: const char* const file; const char* const func; int const line; std::ostringstream os; ThrowBase (const char* file_, const char* func_, int line_) : file (file_), func (func_), line (line_), os () {} private: ThrowBase (const ThrowBase&); ThrowBase& operator= (const ThrowBase&); friend class ThrowError; friend class ThrowSystemError; friend class ThrowFatal; }; /* final */ class ThrowError { public: ThrowError (const char* file_, const char* func_, int line_, int err_) : base (file_, func_, line_), err (err_) {} ~ThrowError() GU_NOEXCEPT(false) GU_NORETURN { Exception e(base.os.str(), err); e.trace (base.file, base.func, base.line); // cppcheck-suppress exceptThrowInDestructor throw e; } std::ostringstream& msg () { return base.os; } private: ThrowBase base; int const err; }; /* final */ class ThrowSystemError { public: ThrowSystemError (const char* file_, const char* func_, int line_, int err_) : base (file_, func_, line_), err (err_) {} ~ThrowSystemError() GU_NOEXCEPT(false) GU_NORETURN { base.os << ": System error: " << err << " (" << ::strerror(err) << ')'; Exception e(base.os.str(), err); e.trace (base.file, base.func, base.line); // cppcheck-suppress exceptThrowInDestructor throw e; } std::ostringstream& msg () { return base.os; } private: ThrowBase base; int const err; }; /* final */ class ThrowFatal { public: ThrowFatal (const char* file, const char* func, int line) : base (file, func, line) {} ~ThrowFatal () GU_NOEXCEPT(false) GU_NORETURN { base.os << " (FATAL)"; Exception e(base.os.str(), ENOTRECOVERABLE); e.trace (base.file, base.func, base.line); // cppcheck-suppress exceptThrowInDestructor throw e; } std::ostringstream& msg () { return base.os; } private: ThrowBase base; }; } // Usage: gu_throw_xxxxx << msg1 << msg2 << msg3; #define gu_throw_error(err_) \ gu::ThrowError(__FILE__, __FUNCTION__, __LINE__, err_).msg() #define gu_throw_system_error(err_) \ gu::ThrowSystemError(__FILE__, __FUNCTION__, __LINE__, err_).msg() #define gu_throw_fatal \ gu::ThrowFatal(__FILE__, __FUNCTION__, __LINE__).msg() #endif // __GU_THROW__ galera-4-26.4.25/galerautils/src/gu_asio_stream_engine.cpp000644 000164 177776 00000053315 15107057155 024660 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // #define GU_ASIO_IMPL #include "gu_asio_stream_engine.hpp" #include "gu_asio_io_service_impl.hpp" #include "gu_asio_debug.hpp" #include "gu_asio_error_category.hpp" #include "gu_throw.hpp" #include "gu_compiler.hpp" #include "gu_datetime.hpp" #include #include #include // Raw TCP stream engine. class AsioTcpStreamEngine : public gu::AsioStreamEngine { public: AsioTcpStreamEngine(int fd) : fd_(fd) , last_error_() { } virtual std::string scheme() const GALERA_OVERRIDE { return gu::scheme::tcp; } virtual enum op_status client_handshake() GALERA_OVERRIDE { return success; } virtual enum op_status server_handshake() GALERA_OVERRIDE { return success; } virtual void shutdown() GALERA_OVERRIDE { /* Note that we shut down the socket only for writes. This * is to keep the socket alive for reads until the peer closes * the connection. */ ::shutdown(fd_, SHUT_WR); } virtual op_result read(void* buf, size_t max_count) GALERA_OVERRIDE { clear_error(); ssize_t bytes_read(::read(fd_, buf, max_count)); if (bytes_read > 0) { return op_result{success, static_cast(bytes_read)}; } else if (bytes_read == 0) { return op_result{eof, 0}; } else if (errno == EAGAIN || errno == EWOULDBLOCK) { return op_result{want_read, 0}; } else { last_error_ = errno; return op_result{error, 0}; } } virtual op_result write(const void* buf, size_t count) GALERA_OVERRIDE { clear_error(); ssize_t bytes_written(::send(fd_, buf, count, MSG_NOSIGNAL)); if (bytes_written > 0) { return op_result{success, static_cast(bytes_written) }; } else if (errno == EAGAIN || errno == EWOULDBLOCK) { return op_result{want_write, 0}; } else { last_error_ = errno; return op_result{error, 0}; } } virtual gu::AsioErrorCode last_error() const GALERA_OVERRIDE { return gu::AsioErrorCode(last_error_, gu_asio_system_category); } private: void clear_error() { last_error_ = 0; } int fd_; int last_error_; }; #ifdef GALERA_HAVE_SSL #include #if OPENSSL_VERSION_NUMBER >= 0x1010100fL #define HAVE_READ_EX #define HAVE_WRITE_EX #endif class AsioSslStreamEngine : public gu::AsioStreamEngine { public: AsioSslStreamEngine(gu::AsioIoService& io_service, int fd) : fd_(fd) , ssl_(::SSL_new(io_service.impl().ssl_context_->native_handle())) , last_error_() , last_verify_error_() , last_error_category_() { ::SSL_set_fd(ssl_, fd_); } ~AsioSslStreamEngine() { ::SSL_free(ssl_); } AsioSslStreamEngine(const AsioSslStreamEngine&) = delete; AsioSslStreamEngine& operator=(const AsioSslStreamEngine&) = delete; virtual std::string scheme() const GALERA_OVERRIDE { return gu::scheme::ssl; } virtual enum op_status client_handshake() GALERA_OVERRIDE { clear_error(); auto result(SSL_connect(ssl_)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::client_handshake: " << result << " ssl error " << ssl_error << " sys error " << sys_error); return map_status(ssl_error, sys_error, "client_handshake"); } virtual enum op_status server_handshake() GALERA_OVERRIDE { clear_error(); auto result(SSL_accept(ssl_)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::server_handshake: " << result << " ssl error " << ssl_error << " sys error " << sys_error); return map_status(ssl_error, sys_error, "server_handshake"); } virtual void shutdown() GALERA_OVERRIDE { clear_error(); auto result(SSL_shutdown(ssl_)); auto ssl_error __attribute__((unused)) (::SSL_get_error(ssl_, result)); auto sys_error __attribute__((unused)) (::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::shutdown: " << result << " ssl error " << ssl_error << " sys error " << sys_error); } virtual op_result read(void* buf, size_t max_count) GALERA_OVERRIDE { clear_error(); return do_read(buf, max_count); } virtual op_result write(const void* buf, size_t count) GALERA_OVERRIDE { clear_error(); return do_write(buf, count); } virtual gu::AsioErrorCode last_error() const GALERA_OVERRIDE { return gu::AsioErrorCode(last_error_, last_error_category_ ? *last_error_category_ : gu_asio_system_category, last_verify_error_); } private: void clear_error() { last_error_ = 0; last_verify_error_ = 0; last_error_category_ = 0; } #ifdef HAVE_READ_EX // Read method with SSL_read_ex which was introduced in 1.1.1. op_result do_read(void* buf, size_t max_count) { size_t bytes_transferred(0); auto result(SSL_read_ex(ssl_, buf, max_count, &bytes_transferred)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::read: " << result << " ssl error " << ssl_error << " sys error " << sys_error << " bytes transferred " << bytes_transferred); return op_result{map_status(ssl_error, sys_error, "read"), bytes_transferred}; } #else // Read method for OpenSSL versions pre 1.1.1. op_result do_read(void* buf, size_t max_count) { size_t bytes_transferred(0); auto result(SSL_read(ssl_, buf, max_count)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::read: " << result << " ssl error " << ssl_error << " sys error " << sys_error << " bytes transferred " << bytes_transferred); if (ssl_error == SSL_ERROR_WANT_READ && (bytes_transferred = SSL_pending(ssl_)) > 0) { result = SSL_read(ssl_, buf, bytes_transferred); assert(static_cast(result) == bytes_transferred); return op_result{map_status(ssl_error, sys_error, "read"), bytes_transferred}; } else if (result > 0) { bytes_transferred = result; } return op_result{map_status(ssl_error, sys_error, "read"), bytes_transferred}; } #endif // HAVE_READ_EX #ifdef HAVE_WRITE_EX op_result do_write(const void* buf, size_t count) { size_t bytes_transferred(0); auto result(SSL_write_ex(ssl_, buf, count, &bytes_transferred)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::write: " << result << " ssl error " << ssl_error << " sys error " << sys_error << " bytes transferred " << bytes_transferred); return op_result{map_status(ssl_error, sys_error, "write"), bytes_transferred}; } #else op_result do_write(const void* buf, size_t count) { size_t bytes_transferred(0); auto result(SSL_write(ssl_, buf, count)); auto ssl_error(::SSL_get_error(ssl_, result)); auto sys_error(::ERR_get_error()); GU_ASIO_DEBUG(this << " AsioSslStreamEngine::write: " << result << " ssl error " << ssl_error << " sys error " << sys_error << " bytes transferred " << bytes_transferred); if (result > 0) { bytes_transferred = result; } return op_result{map_status(ssl_error, sys_error, "write"), bytes_transferred}; } #endif // HAVE_WRITE_EX enum op_status map_status(int ssl_error, int sys_error, const char* op) { switch (ssl_error) { case SSL_ERROR_NONE: return success; case SSL_ERROR_WANT_WRITE: return want_write; case SSL_ERROR_WANT_READ: return want_read; case SSL_ERROR_SYSCALL: last_error_ = sys_error; return (sys_error == 0 ? eof : error); case SSL_ERROR_SSL: { last_error_ = sys_error; last_error_category_ = &gu_asio_ssl_category; last_verify_error_ = SSL_get_verify_result(ssl_); return error; } case SSL_ERROR_ZERO_RETURN: { last_error_ = 0; last_error_category_ = &gu_asio_ssl_category; last_verify_error_ = SSL_get_verify_result(ssl_); return eof; } } log_warn << "Unhandled SSL error " << ssl_error; assert(0); last_error_ = sys_error; last_error_category_ = &gu_asio_ssl_category; return error; } int fd_; SSL* ssl_; int last_error_; int last_verify_error_; const gu::AsioErrorCategory* last_error_category_; }; /* * DynamicStreamEngine is used to choose either TCP or SSL for socket communication. * Following condition should be true: Ts(server timeout) > Tc(client timeout). * * Following diagrams show combinations possible with TCP/SSL/Dynamic stream engine. * * 1. CLIENT - dynamic, SERVER - standard * * C(d) S(s/TCP) * |------| <--TCP-- | * | Tc | | * |----->| | * | | * * 2. CLIENT - dynamic (with SSL), SERVER - standard (with SSL) * * C(d) S(s/TLS) * |------| | * | Tc | | * |----->| | * | --SSL--> | * |------| <--SSL-- | * | Tc | | * (A) * |----->| | * * (A) Packet is received on second client timeout period, it should be SSL response packet * * 3. CLIENT - standard, SERVER - dynamic (with SSL) * * C(s/TLS) S(d) * | --SSL--> |------| * | | | * | | Ts | * | | | * | |<-----| * | | * * 4. CLIENT - standard, SERVER - dynamic * * C(s/TCP) S(d) * | |------| * | | | * | | Ts | * | | | * | |<-----| * | |------| * | | | * | | Ts | * | | | * | |<-----| * | <--TCP-- | * | | * * 5. CLIENT - dynamic (with SSL), SERVER - dynamic (with SSL) * * C(d) S(d) * |------| |------| * | Tc | | | * |----->| | Ts | * | --SSL--> | | * (A) * |------| <--SSL-- |<-----| * (B) * | Tc | | * |----->| | * * (A) Packet is received on first server timeout, it should be SSL request packet, we support SSL so we'll send SSL response * (B) Packet is received on second client timeout, it should be SSL response packet * * 6. CLIENT - dynamic (with SSL), SERVER - dynamic (without SSL) * * C(d) S(d) * |------| |------| * | Tc | | | * |----->| | Ts | * | --SSL--> | | * (A) * |------| |<-----| * | Tc | |<-----| * |----->| | | * | | Ts | * | | | * | |<-----| * | <--TCP-- | * | --TCP--> | * * (A) Packet is received on first timeout, it should be SSL request packet, but we don't * support SSL so we should timeout * (B) Nothing is received on second timeout period, so it should be TCP packet * * 7. CLIENT - dynamic (without SSL), SERVER - dynamic (with/without SSL) * * C(d) S(d) * |------| |------| * | Tc | | | * |----->| | Ts | * | | | * | |<-----| * | |<-----| * | | | * | | Ts | * | | | * | |<-----| * | <--TCP-- | * (A) * | --TCP--> | * * (A) Packet is on client received after timeout period, SSL CLIENT HELLO was not * sent, so it should be TCP packet * */ class AsioDynamicStreamEngine : public gu::AsioStreamEngine { public: AsioDynamicStreamEngine(gu::AsioIoService& io_service, int fd, bool non_blocking, bool encrypted_protocol) : client_timeout_(500 * gu::datetime::MSec) , server_timeout_(750 * gu::datetime::MSec) , fd_(fd) , io_service_(io_service) , engine_(std::make_shared(fd_)) , non_blocking_(non_blocking) , have_encrypted_protocol_(encrypted_protocol) , timer_check_done_(false) , client_encrypted_message_sent_(false) , client_encrypted_message_sent_ts_(gu::datetime::Date::zero()) { } ~AsioDynamicStreamEngine() { } AsioDynamicStreamEngine(const AsioDynamicStreamEngine&) = delete; AsioDynamicStreamEngine& operator=(const AsioDynamicStreamEngine&) = delete; virtual std::string scheme() const GALERA_OVERRIDE { return engine_->scheme(); } virtual enum op_status client_handshake() GALERA_OVERRIDE { if (not timer_check_done_) { if (not client_encrypted_message_sent_) { bool received = socket_poll(client_timeout_.get_nsecs() / gu::datetime::MSec); if (have_encrypted_protocol_ && not received) { engine_.reset(); engine_ = std::make_shared(io_service_, fd_); client_encrypted_message_sent_ = true; client_encrypted_message_sent_ts_ = gu::datetime::Date::monotonic(); if (not non_blocking_) { fcntl(fd_, F_SETFL, fcntl(fd_, F_GETFL, 0) | O_NONBLOCK); } op_status result = success; bool tcp_engine_switch = false; while(true) { result = engine_->client_handshake() ; if (non_blocking_) { return result; } if (result == AsioStreamEngine::success || result == AsioStreamEngine::error) { break; } received = socket_poll(client_timeout_.get_nsecs() / gu::datetime::MSec); if (not received) { engine_.reset(); engine_ = std::make_shared(fd_); tcp_engine_switch = true; break; } } if (not non_blocking_) { fcntl(fd_, F_SETFL, fcntl(fd_, F_GETFL, 0) ^ O_NONBLOCK); if (not tcp_engine_switch) { return result; } } } } else { gu::datetime::Date now(gu::datetime::Date::monotonic()); if (client_encrypted_message_sent_ts_ + client_timeout_ < now) { engine_.reset(); engine_ = std::make_shared(fd_); } } timer_check_done_ = true; } return engine_->client_handshake(); } virtual enum op_status server_handshake() GALERA_OVERRIDE { if (not timer_check_done_) { bool received = socket_poll(server_timeout_.get_nsecs() / gu::datetime::MSec); int bytes_available; ioctl(fd_, FIONREAD, &bytes_available); if (have_encrypted_protocol_ && received && bytes_available) { engine_.reset(); engine_ = std::make_shared(io_service_, fd_); timer_check_done_ = true; return engine_->server_handshake(); } else if (not have_encrypted_protocol_) { if (received && bytes_available) { std::vector pending_data(bytes_available); engine_->read(pending_data.data(), bytes_available); } socket_poll(server_timeout_.get_nsecs() / gu::datetime::MSec); } timer_check_done_ = true; } return engine_->server_handshake(); } virtual void shutdown() GALERA_OVERRIDE { engine_->shutdown(); timer_check_done_ = false; client_encrypted_message_sent_ = false; engine_ = std::make_shared(fd_); } virtual op_result read(void* buf, size_t max_count) GALERA_OVERRIDE { return engine_->read(buf, max_count); } virtual op_result write(const void* buf, size_t count) GALERA_OVERRIDE { return engine_->write(buf, count); } virtual gu::AsioErrorCode last_error() const GALERA_OVERRIDE { return engine_->last_error(); } private: bool socket_poll(long msec) { struct pollfd pfd; pfd.fd = fd_; pfd.events = POLLIN; switch (poll(&pfd, 1, msec)) { // Timeout case 0: { return false; } // Error case -1: { return false; } // Data available default: { if (pfd.revents & POLLIN) { return true; } else { return false; } } } } gu::datetime::Period client_timeout_; gu::datetime::Period server_timeout_; int fd_; gu::AsioIoService& io_service_; std::shared_ptr engine_; bool non_blocking_; bool have_encrypted_protocol_; bool timer_check_done_; bool client_encrypted_message_sent_; gu::datetime::Date client_encrypted_message_sent_ts_; }; #endif // GALERA_HAVE_SSL std::shared_ptr gu::AsioStreamEngine::make( AsioIoService& io_service, const std::string& scheme, int fd, bool non_blocking) { if (scheme == "tcp") { #ifdef GALERA_HAVE_SSL if (not io_service.dynamic_socket_enabled()) #else // GALERA_HAVE_SSL if (io_service.dynamic_socket_enabled()) { GU_ASIO_DEBUG("Dynamic socket enabled without SSL compiled, using TCP engine") } #endif { GU_ASIO_DEBUG("AsioStreamEngine::make use TCP engine"); return std::make_shared(fd); } #ifdef GALERA_HAVE_SSL else { GU_ASIO_DEBUG("AsioStreamEngine::make use Dynamic engine") return std::make_shared(io_service, fd, non_blocking, io_service.ssl_enabled()); } #endif // GALERA_HAVE_SSL } #ifdef GALERA_HAVE_SSL else if (scheme == "ssl") { if (not io_service.dynamic_socket_enabled()) { GU_ASIO_DEBUG("AsioStreamEngine::make use SSL engine"); return std::make_shared(io_service, fd); } else { GU_ASIO_DEBUG("AsioStreamEngine::make use Dynamic engine"); return std::make_shared(io_service, fd, non_blocking, io_service.ssl_enabled()); } } #endif // GALERA_HAVE_SSL else { gu_throw_error(EINVAL) << "Stream engine not implemented for scheme " << scheme; return std::shared_ptr(); } } std::ostream& gu::operator<<(std::ostream& os, enum AsioStreamEngine::op_status status) { switch (status) { case AsioStreamEngine::success: os << "success"; break; case AsioStreamEngine::want_read: os << "want_read"; break; case AsioStreamEngine::want_write: os << "want_write"; break; case AsioStreamEngine::eof: os << "eof"; break; case AsioStreamEngine::error: os << "error"; break; default: os << "unknown(" << static_cast(status) << ")"; break; } return os; } galera-4-26.4.25/galerautils/src/gu_datetime.cpp000644 000164 177776 00000014754 15107057155 022625 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * $Id$ */ #include "gu_datetime.hpp" #include "gu_logger.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include "gu_regex.hpp" #include namespace { /* * Parser for real numbers without loss of precision. Returns long long. */ /* Regular expression for reals. Allowed formats: * 1 * 1.1 * .1 */ const char* real_regex_str = "^([0-9]*)?\\.?([0-9]*)?$"; enum RealParts { integer = 1, decimal = 2 }; constexpr size_t num_real_parts = 3; gu::RegEx real_regex(real_regex_str); /* Helper to compute powers of 10 without floating point arithmetic. * The exponents must be integer in range [0, 9). */ long long pow_10(int exponent) { if (exponent < 0 || exponent >= 9) { throw gu::NotFound(); } long long result = 1; while (exponent != 0) { result *= 10; --exponent; } return result; } /* Real number representation with integer and decimal parts separated. Decimal part is represented in nanounits. */ struct Real { long long integer{0}; // Integer part long long decimal{0}; // Decimal part in nanounits }; /* Parse real number frrom string. */ Real real_from_string(const std::string& str) try { Real ret; std::vector parts( real_regex.match(str, num_real_parts)); if (parts.size() != 3) { throw gu::NotFound(); } if (parts[RealParts::integer].is_set()) { const auto& str = parts[RealParts::integer].str(); if (str.size()) { ret.integer = std::stoll(str); } } if (parts[RealParts::decimal].is_set()) { const auto& str = parts[RealParts::decimal].str(); if (str.size()) { const size_t n_decis = str.size(); if (n_decis > 9) { throw gu::NotFound(); } const int exponent = 9 - n_decis; const long long multiplier = pow_10(exponent); ret.decimal = std::stoll(str) * multiplier; } } return ret; } catch (...) { throw gu::NotFound(); } /* Parse seconds from string, return long long. */ long long seconds_from_string(const std::string& str) { auto real = real_from_string(str); const long long max = std::numeric_limits::max(); if (max/gu::datetime::Sec < real.integer) { /* Multiplication would overflow */ throw gu::NotFound(); } if (real.integer * gu::datetime::Sec > max - real.decimal) { /* Addition would overflow */ throw gu::NotFound(); } return real.integer * gu::datetime::Sec + real.decimal; } /* Parse seconds from string with multiplier. It is assumed that the * str argument contains integer. */ template long long seconds_from_string_mult(const std::string& str) try { const long long val = std::stoll(str); const long long max = std::numeric_limits::max(); if (max/Mult < val) { /* Multiplication would overflow */ throw gu::NotFound(); } return (val * Mult); } catch(...) { throw gu::NotFound(); } const char* const period_regex = "^(P)(([0-9]+)Y)?(([0-9]+)M)?(([0-9]+)D)?" /* 1 23 45 67 */ "((T)?(([0-9]+)H)?(([0-9]+)M)?(([0-9]+(\\.?[0-9]*))?S)?)?$"; /* 89 11 13 15 */ gu::RegEx regex(period_regex); enum { GU_P = 1, GU_YEAR = 3, GU_MONTH = 5, GU_DAY = 7, GU_HOUR = 10, GU_MIN = 12, GU_SEC = 15, GU_NUM_PARTS = 17 }; struct regex_group { int index; std::function parse; }; const struct regex_group regex_groups[] { { GU_YEAR, seconds_from_string_mult }, { GU_MONTH, seconds_from_string_mult }, { GU_DAY, seconds_from_string_mult }, { GU_HOUR, seconds_from_string_mult }, { GU_MIN, seconds_from_string_mult }, { GU_SEC, seconds_from_string }, }; long long iso8601_duration_to_nsecs(const std::string& str) { long long nsecs = 0; std::vector parts; try { parts = regex.match(str, GU_NUM_PARTS); } catch (...) { throw gu::NotFound(); } for (auto g : regex_groups) { if (parts[g.index].is_set()) { const long long val(g.parse(parts[g.index].str())); const long long max(std::numeric_limits::max()); if (nsecs > max - val) { // addition would overflow throw gu::NotFound(); } nsecs += val; } } return nsecs; } } long long gu::datetime::SimClock::counter_(0); bool gu::datetime::SimClock::initialized_(false); std::ostream& gu::datetime::operator<<(std::ostream& os, const Date& d) { os << d.get_utc(); return os; } std::ostream& gu::datetime::operator<<(std::ostream& os, const Period& p) { os << "P"; int64_t nsecs(p.get_nsecs()); if (nsecs/Year > 0) { os << (nsecs/Year) << "Y"; nsecs %= Year; } if (nsecs/Month > 0) { os << (nsecs/Month) << "M"; nsecs %= Month; } if (nsecs/Day > 0) { os << (nsecs/Day) << "D"; nsecs %= Day; } if (nsecs > 0) { os << "T"; } if (nsecs/Hour > 0) { os << (nsecs/Hour) << "H"; nsecs %= Hour; } if (nsecs/Min > 0) { os << (nsecs/Min) << "M"; nsecs %= Min; } if (double(nsecs)/Sec >= 1.e-9) { os << (double(nsecs)/Sec) << "S"; } return os; } void gu::datetime::Date::parse(const std::string& str) { if (str == "") { return; } gu_throw_fatal << "not implemented"; } void gu::datetime::Period::parse(const std::string& str) { try { nsecs = ::iso8601_duration_to_nsecs(str); } catch (...) { nsecs = seconds_from_string(str); } } galera-4-26.4.25/galerautils/src/gu_exception.cpp000644 000164 177776 00000000737 15107057155 023023 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2015 Codership Oy * */ #include #include "gu_utils.hpp" #include "gu_exception.hpp" namespace gu { void Exception::trace (const char* file, const char* func, int line) const { msg_.reserve (msg_.length() + ::strlen(file) + ::strlen(func) + 15); msg_ += "\n\t at "; msg_ += file; msg_ += ':'; msg_ += func; msg_ += "():"; msg_ += to_string(line); } } galera-4-26.4.25/galerautils/src/gu_asio_ip_address_impl.hpp000644 000164 177776 00000003447 15107057155 025204 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @file gu_asio_ip_address_impl.hpp * * IP address implementation wrappers. */ #ifndef GU_ASIO_IP_ADDRESS_IMPL_HPP #define GU_ASIO_IP_ADDRESS_IMPL_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio.hpp" #include "asio/ip/address.hpp" namespace gu { class AsioIpAddressV4::Impl { public: Impl() : impl_() { } asio::ip::address_v4& native() { return impl_; } const asio::ip::address_v4& native() const { return impl_; } private: asio::ip::address_v4 impl_; }; class AsioIpAddressV6::Impl { public: Impl() : impl_() { } asio::ip::address_v6& native() { return impl_; } const asio::ip::address_v6& native() const { return impl_; } private: asio::ip::address_v6 impl_; }; class gu::AsioIpAddress::Impl { public: Impl() : impl_() {} asio::ip::address& native() { return impl_; } const asio::ip::address& native() const { return impl_; } private: asio::ip::address impl_; }; } static inline std::string escape_addr(const asio::ip::address& addr) { if (gu_likely(addr.is_v4() == true)) { return addr.to_v4().to_string(); } else { return "[" + addr.to_v6().to_string() + "]"; } } static inline asio::ip::address make_address(const std::string& addr) { return asio::ip::address::from_string(gu::unescape_addr(addr)); } static inline std::string any_addr(const asio::ip::address& addr) { if (gu_likely(addr.is_v4() == true)) { return addr.to_v4().any().to_string(); } else { return addr.to_v6().any().to_string(); } } #endif // GU_ASIO_IP_ADDRESS_IMPL_HPP galera-4-26.4.25/galerautils/src/gu_asio_debug.hpp000644 000164 177776 00000000640 15107057155 023124 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // #ifndef GU_ASIO_DEBUG_HPP #define GU_ASIO_DEBUG_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL // #define GU_ASIO_ENABLE_DEBUG #ifdef GU_ASIO_ENABLE_DEBUG #define GU_ASIO_DEBUG(msg_) log_info << msg_; #else #define GU_ASIO_DEBUG(msg_) #endif /* GU_ASIO_ENABLE_DEBUG */ #endif // GU_ASIO_DEBUG_HPP galera-4-26.4.25/galerautils/src/gu_enable_non_virtual_dtor.hpp000644 000164 177776 00000001424 15107057155 025722 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // // Note that there are no usual header guards because this header // may have to be included several times for compilation unit. /** * @file gu_enable_non_virtual_dtor.hpp * * This file accompanied with gu_disable_non_virtual_dtor.hpp * can be used to disable/enable -Wnon-virtual-dtor compiler warning * temporarily when it is not desirable to disable the warning completely * for compilation. * * This can be useful when using public inheritance from standard * library classes, especially std::enable_shared_from_this. */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif galera-4-26.4.25/galerautils/src/gu_event_service.cpp000644 000164 177776 00000001752 15107057155 023664 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2021 Codership Oy // #include "gu_event_service.hpp" #include // // Event service hooks. // namespace gu { std::mutex EventService::mutex; size_t EventService::usage(0); EventService* EventService::instance = nullptr; int EventService::init_v1(const wsrep_event_service_v1_t* es) { std::lock_guard lock(EventService::mutex); ++EventService::usage; if (EventService::instance) { assert(0); return 0; } EventService::instance = new EventService(es->context, es->event_cb); return 0; } void EventService::deinit_v1() { std::lock_guard lock(EventService::mutex); assert(EventService::usage > 0); --EventService::usage; if (EventService::usage == 0) { delete EventService::instance; EventService::instance = 0; } } } /* galera*/ galera-4-26.4.25/galerautils/src/gu_asio.hpp000644 000164 177776 00000057463 15107057155 021775 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2014-2024 Codership Oy // // // Common ASIO methods and configuration options for Galera // #ifndef GU_ASIO_HPP #define GU_ASIO_HPP #include "gu_config.hpp" #include "gu_uri.hpp" #include "gu_signals.hpp" #include "wsrep_allowlist_service.h" #include "wsrep_node_isolation.h" #include // tcp_info #include #include #include #include #include #include namespace gu { // URI schemes for networking namespace scheme { const std::string tcp("tcp"); /// TCP scheme const std::string udp("udp"); /// UDP scheme const std::string ssl("ssl"); /// SSL scheme const std::string def("tcp"); /// default scheme (TCP) } namespace conf { // Enable dynamic socket support const std::string socket_dynamic("socket.dynamic"); } #ifdef GALERA_HAVE_SSL // // SSL // // Configuration options for sockets namespace conf { /// Enable SSL explicitly const std::string use_ssl("socket.ssl"); /// SSL cipher list const std::string ssl_cipher("socket.ssl_cipher"); /// SSL compression algorithm const std::string ssl_compression("socket.ssl_compression"); /// SSL private key file const std::string ssl_key("socket.ssl_key"); /// SSL certificate file const std::string ssl_cert("socket.ssl_cert"); /// SSL CA file const std::string ssl_ca("socket.ssl_ca"); /// SSL password file const std::string ssl_password_file("socket.ssl_password_file"); // SSL reload const std::string ssl_reload("socket.ssl_reload"); } // register ssl parameters to config void ssl_register_params(gu::Config&); // initialize defaults, verify set options void ssl_init_options(gu::Config&); // update ssl parameters void ssl_param_set(const std::string&, const std::string&, gu::Config&); #else static inline void ssl_register_params(gu::Config&) { } static inline void ssl_init_options(gu::Config&) { } #endif // GALERA_HAVE_SSL // // Address manipulation helpers // /** * @class AsioIpAddressV4 * * A wrapper around asio::ip::address_v4 */ class AsioIpAddressV4 { public: AsioIpAddressV4(); AsioIpAddressV4(const AsioIpAddressV4&); AsioIpAddressV4& operator=(AsioIpAddressV4); ~AsioIpAddressV4(); bool is_multicast() const; class Impl; Impl& impl(); const Impl& impl() const; private: std::unique_ptr impl_; }; /** * @class AsioIpAddressV6 * * A wrapper around asio::ip::address_v6 */ class AsioIpAddressV6 { public: AsioIpAddressV6(); AsioIpAddressV6(const AsioIpAddressV6&); AsioIpAddressV6& operator=(AsioIpAddressV6); ~AsioIpAddressV6(); bool is_link_local() const; unsigned long scope_id() const; bool is_multicast() const; class Impl; Impl& impl(); const Impl& impl() const; private: std::unique_ptr impl_; }; /** * @class AsioIpAddressV6 * * A wrapper around asio::ip::address */ class AsioIpAddress { public: class Impl; AsioIpAddress(); AsioIpAddress(const AsioIpAddress&); AsioIpAddress& operator=(AsioIpAddress); ~AsioIpAddress(); bool is_v4() const; bool is_v6() const; AsioIpAddressV4 to_v4() const; AsioIpAddressV6 to_v6() const; Impl& impl(); const Impl& impl() const; private: std::unique_ptr impl_; }; // Return any address string. std::string any_addr(const AsioIpAddress& addr); // Escape address string. Surrounds IPv6 address with []. // IPv4 addresses not affected. std::string escape_addr(const AsioIpAddress& addr); // Unescape address string. Remove [] from around the address if found. std::string unescape_addr(const std::string& addr); // Construct asio::ip::address from address string AsioIpAddress make_address(const std::string& addr); class AsioMutableBuffer { public: AsioMutableBuffer() : data_(), size_() { } AsioMutableBuffer(void* data, size_t size) : data_(data) , size_(size) { } void* data() const { return data_; } size_t size() const { return size_; } private: void* data_; size_t size_; }; class AsioConstBuffer { public: AsioConstBuffer() : data_() , size_() { } AsioConstBuffer(const void* data, size_t size) : data_(data) , size_(size) { } const void* data() const { return data_; } size_t size() const { return size_; } private: const void* data_; size_t size_; }; class AsioErrorCategory; class AsioErrorCode { public: /** * A default constructor. Constructs success error code. */ AsioErrorCode(); /** * A constructor to generate error codes from system error codes. */ AsioErrorCode(int value); /** * A constructor to generate error codes from asio errors. */ AsioErrorCode(int value, const AsioErrorCategory& category) : value_(value) , category_(&category) , error_extra_() { } AsioErrorCode(int value, const AsioErrorCategory& category, int error_extra) : value_(value) , category_(&category) , error_extra_(error_extra) { } /** * Return error number. */ int value() const { return value_; } const AsioErrorCategory* category() const { return category_; } /** * Return human readable error message. */ std::string message() const; operator bool() const { return value_; } bool operator!() const { return value_ == 0; } static AsioErrorCode make_eof(); /** * Return true if the error is end of file. */ bool is_eof() const; /** * Return true if the error is system error. */ bool is_system() const; private: int value_; const AsioErrorCategory* category_; // Extra category specific error information int error_extra_; }; std::ostream& operator<<(std::ostream&, const AsioErrorCode&); /* * Helper to determine if the error code originates from an * event which happens often and pollutes logs but for which the error * does not provide any helpful information. * * Errors which happen frequently during cluster configuration changes * and when connections break are considered verbose. * * Certain SSL errors such as 'short read' error are considered verbose. * * All errors which originate from TLS service hooks are considered * verbose, it is up to application report them if appropriate. */ bool is_verbose_error(const AsioErrorCode&); // TODO: Hide extra error info from public interface. It should be // called internally by calls which produce human readable error messages. #ifdef GALERA_HAVE_SSL std::string extra_error_info(const gu::AsioErrorCode& ec); #else // GALERA_HAVE_SSL static inline std::string extra_error_info(const gu::AsioErrorCode&) { return ""; } #endif // GALERA_HAVE_SSL class AsioSocket; /** * Abstract interface for asio socket handlers. */ class AsioSocketHandler { public: virtual ~AsioSocketHandler() { } /** * This will be called after asynchronous connection to the * remote endpoint after call to AsioSocket::async_connect() * completes, or after accepted socket becomes ready. * * All internal protocol handshakes (e.g. SSL) will be completed * before this handler is called. * * @param socket Reference to socket which initiated the call. * @param ec Error code. */ virtual void connect_handler(AsioSocket& socket, const AsioErrorCode& ec) = 0; virtual void write_handler(AsioSocket&, const AsioErrorCode&, size_t bytes_transferred) = 0; /** * This call is called every time more data has been written * into receive buffer submitted via async_read() call. * The return value of the call should be maximum number of * bytes to be transferred before the read is considered * complete and read_handler() will be called. * * If the returned value is larger than the available space in * read buffer, the maximum number of bytes to be transferred * will be the available space in read buffer. It is up to application * to resize the read buffer in read_handler() and restart async read * if the available space was not enough to contain the whole message. * * @param socket Stream socket associated to this handler. * @param ec Error code. * @param bytes_transferred Number of bytes transferred so far. * * @return Maximum number of bytes to read to complete the * read operation. */ virtual size_t read_completion_condition(AsioSocket& socket, const AsioErrorCode&, size_t bytes_transferred) = 0; virtual void read_handler(AsioSocket&, const AsioErrorCode&, size_t bytes_transferred) = 0; }; /** * @class AsioSocket * * Abstract interface for stream socket implementations. * * Although the interface provides both sync and async operations, * those should never be mixed. If the socket is connected * via connect() call (or accepted via AsioAcceptor::accept() call), * the underlying implementation uses blocking calls for * reading and writing. On the other hand, if async_connect() * or AsioAcceptor::async_accept() is used, the underlying implementation * uses non-blocking operations. */ class AsioSocket { public: AsioSocket() { } AsioSocket(const AsioSocket&) = delete; AsioSocket& operator=(const AsioSocket&) = delete; virtual ~AsioSocket() { } /** * Open the socket without connecting. */ virtual void open(const gu::URI& uri) = 0; /** * Return true if the socket is open. */ virtual bool is_open() const = 0; /** * Shutdown the socket. */ virtual void shutdown() = 0; /** * Close the socket. */ virtual void close() = 0; /** * Bind the socket to interface specified by address. */ virtual void bind(const gu::AsioIpAddress&) = 0; // Asynchronous operations virtual void async_connect( const gu::URI& uri, const std::shared_ptr& handler) = 0; /** * Call once. Next call can be made after socket handler is called * with bytes transferred equal to last write size. */ virtual void async_write( const std::array&, const std::shared_ptr& handler) = 0; /** * Call once. Next call can be done from socket handler * read_handler or read_completion_condition. */ virtual void async_read( const AsioMutableBuffer&, const std::shared_ptr& handler) = 0; // Synchronous operations /** * Connect to remote endpoint specified by uri. * * @throw gu::Exception in case of error. */ virtual void connect(const gu::URI& uri) = 0; /** * Write contents of buffer into socket. This call blocks until * all data has been written or error occurs. * * @throw gu::Exception in case of error. */ virtual size_t write(const AsioConstBuffer& buffer) = 0; /** * Read data from socket into buffer. The value returned is the * number of bytes read so far. * * @throw gu::Exception in case of error. */ virtual size_t read(const AsioMutableBuffer& buffer) = 0; // Utility operations. /** * Return address URI of local endpoint. Return empty string * if not connected. */ virtual std::string local_addr() const = 0; /** * Return address URI of remote endpoint. Returns empty string * if not connected. */ virtual std::string remote_addr() const = 0; /** * Set receive buffer size for the socket. This must be called * before the socket is connected/accepted. */ virtual void set_receive_buffer_size(size_t) = 0; /** * Return currently effective receive buffer size. */ virtual size_t get_receive_buffer_size() = 0; /** * Set send buffer size for the socket. This must be called * before the socket is connected/accepted. */ virtual void set_send_buffer_size(size_t) = 0; /** * Return currently effective send buffer size. */ virtual size_t get_send_buffer_size() = 0; /** * Read tcp_info struct from the underlying TCP socket. */ virtual struct tcp_info get_tcp_info() = 0; }; /** * Helper template to write buffer sequences. * * @todo This should probably be optimized by implementing * AsioSocket::write() overload which takes iterators to * buffer sequences. */ template size_t write(AsioSocket& socket, const ConstBufferSequence& bufs) { size_t written(0); for (auto b(bufs.begin()); b != bufs.end(); ++b) { if (b->size() > 0) { written += socket.write(AsioConstBuffer(b->data(), b->size())); } } return written; } class AsioDatagramSocket; class AsioDatagramSocketHandler { public: virtual ~AsioDatagramSocketHandler() { } virtual void read_handler(AsioDatagramSocket&, const AsioErrorCode&, size_t bytes_transferred) = 0; }; /** * @class AsioDatagramSocket * * Abstract interface for datagram socket implementations. */ class AsioDatagramSocket { public: AsioDatagramSocket() { } virtual ~AsioDatagramSocket() noexcept(false) { } /** * Open the socket. */ virtual void open(const URI&) = 0; /** * Connect the socket to desired local endpoint. The socket * will be bound to endpoint specified the uri. If the uri * contains a multicast address, the connect will join the * multicast group automatically. */ virtual void connect(const URI& uri) = 0; /** * Close the socket. If the socket was joined to multicast group, * the group is left automatically. */ virtual void close() = 0; /** * Performa a write to the socket. The write is best effort only * and the message can be dropped because of various reasons like * kernel send buffer being full, network dropping the packet or * receiving end(s) dropping the packet for whatever reason. * * The socket must be connected before writing into it. * If connect() is not called, send_to() can be used to send * datagram into desired address. * * @param bufs Array of two buffers. * * @throw gu::Exception If an other error than message being dropped * occurs, an exception containing the error code will be thrown. */ virtual void write(const std::array& bufs) = 0; /** * Send a datagram to destination given by target. Sending a * message is best effort only, the message may be dropped * because of whatever reason and no error is given if the * target endpoint does not exist. */ virtual void send_to(const std::array& bufs, const AsioIpAddress& target_host, unsigned short target_port) = 0; /** * Start asynchronous read from the socket. The socket handler * read_handler() method will be called for each complete message * which has been received. */ virtual void async_read(const AsioMutableBuffer&, const std::shared_ptr& handler) = 0; /** * Return address containing the local endpoint where the socket * was bound to. * * @todo Maybe this should be bind addr and corresponding call * connected_addr() should be introduced. */ virtual std::string local_addr() const = 0; }; class AsioAcceptor; class AsioAcceptorHandler { public: virtual ~AsioAcceptorHandler() { } virtual void accept_handler(AsioAcceptor&, const std::shared_ptr&, const gu::AsioErrorCode&) = 0; }; // Forward declaration for AsioAcceptor and make_socket() class AsioStreamEngine; /** @class AsioAcceptor * * Acceptor interface for stream sockets. */ class AsioAcceptor { public: AsioAcceptor() { } AsioAcceptor(const AsioAcceptor&) = delete; AsioAcceptor& operator=(const AsioAcceptor&) = delete; virtual ~AsioAcceptor() { } virtual void open(const gu::URI& uri) = 0; virtual bool is_open() const = 0; virtual void listen(const gu::URI& uri) = 0; virtual void close() = 0; virtual void async_accept(const std::shared_ptr&, const std::shared_ptr&, const std::shared_ptr& engine = nullptr) = 0; virtual std::shared_ptr accept() = 0; virtual std::string listen_addr() const = 0; virtual unsigned short listen_port() const = 0; /** * Set receive buffer size for the acceptor. This must be called * before listening. */ virtual void set_receive_buffer_size(size_t) = 0; /** * Return currently effective receive buffer size. */ virtual size_t get_receive_buffer_size() = 0; /** * Set send buffer size for the acceptor. This must be called * before listening. */ virtual void set_send_buffer_size(size_t) = 0; virtual size_t get_send_buffer_size() = 0; }; class AsioIoService { public: AsioIoService(const gu::Config& conf = gu::Config()); ~AsioIoService(); AsioIoService(const AsioIoService&) = delete; AsioIoService operator=(const AsioIoService&) = delete; /** * Handle global signals. */ void handle_signal(const gu::Signals::SignalType&); /** * Is dynamic socket enabled */ bool dynamic_socket_enabled() const { return dynamic_socket_; } /** * Is SSL enabled and configured */ bool ssl_enabled() const; /** * Load crypto context. */ void load_crypto_context(); /** * Run one IO service handler. */ void run_one(); /** * Run at most one IO service handler, return immediately * if no handlers are ready to run. */ void poll_one(); /** * Run until IO service is stopped or runs out of work. * * @return Number of events processed. */ size_t run(); /** * Post a function for execution. The function will be invoked * from inside run() or run_one(). */ void post(std::function); /** * Stop the IO service processing loop and return from run_one() * or run() calls as soon as possible. Call to reset() is required * to start processing via run_one() or run() after stop() has * been called. */ void stop(); /** * Reset the IO service for subsequent call to run() or run_one(). * This function must not be called from inside run() or run_one(). */ void reset(); /** * Make a new socket. The underlying transport will be * a stream socket (TCP, SSL). * * @param uri An URI describing a desired socket scheme. * @param handler Pointer to socket handler implementation. * * @return Shared pointer to AsioSocket. */ std::shared_ptr make_socket( const gu::URI& uri, const std::shared_ptr& engine = nullptr); /** * Make a new datagram socket. The underlying transport * will be a datagram socket (UDP). * * @param uri An URI describing a desired socket scheme. * @param handler Pointer to socket handler implementation. * * @return Shared pointer to AsioDatagramSocket. */ std::shared_ptr make_datagram_socket( const gu::URI& uri); /** * Make a new acceptor. * * @param uri Uri describing a desired socket scheme. * @param acceptor_handler Pointer to acceptor handler implementation. * @param socket_handler Pointer to socket handler implementation. * * @return Shared pointer to AsioSocketAcceptor. */ std::shared_ptr make_acceptor(const gu::URI& uri); class Impl; Impl& impl(); private: std::unique_ptr impl_; const gu::Config& conf_; gu::Signals::signal_connection signal_connection_; bool dynamic_socket_; }; class AsioSteadyTimerHandler { public: virtual ~AsioSteadyTimerHandler() { } virtual void handle_wait(const AsioErrorCode&) = 0; }; #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) typedef std::chrono::monotonic_clock AsioClock; #else typedef std::chrono::steady_clock AsioClock; #endif // (__GNUC__ == 4 && __GNUC_MINOR__ == 4) class AsioSteadyTimer { public: AsioSteadyTimer(AsioIoService& io_service); ~AsioSteadyTimer(); AsioSteadyTimer(const AsioSteadyTimer&) = delete; AsioSteadyTimer& operator=(const AsioSteadyTimer&) = delete; void expires_from_now(const AsioClock::duration&); void async_wait(const std::shared_ptr&); void cancel(); private: class Impl; std::unique_ptr impl_; }; /* Allowlist check callback */ bool allowlist_value_check(wsrep_allowlist_key_t key, const std::string& value); /* Init/deinit global allowlist service hooks. */ int init_allowlist_service_v1(wsrep_allowlist_service_v1_t*); void deinit_allowlist_service_v1(); /* Global isolation mode. */ extern std::atomic gu_asio_node_isolation_mode; } #endif // GU_ASIO_HPP galera-4-26.4.25/galerautils/src/gu_stats.hpp000644 000164 177776 00000001741 15107057155 022164 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef _gu_stats_hpp_ #define _gu_stats_hpp_ #include namespace gu { class Stats { public: Stats():n_(0), old_m_(), new_m_(), old_s_(), new_s_(), min_(), max_() {} void insert(const double); void clear() { n_ = 0; } unsigned int times() const { return n_; } double min() const; double max() const; double mean() const; double variance() const; double std_dev() const; friend std::ostream& operator<<(std::ostream&, const Stats&); std::string to_string() const; private: unsigned int n_; double old_m_; double new_m_; double old_s_; double new_s_; double min_; double max_; }; std::ostream& operator<<(std::ostream&, const Stats&); } #endif // _gu_stats_hpp_ galera-4-26.4.25/galerautils/src/gu_cond.hpp000644 000164 177776 00000003043 15107057155 021746 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy */ #ifndef __GU_COND__ #define __GU_COND__ #include "gu_threads.h" #include "gu_macros.h" #include "gu_exception.hpp" //#include #include // TODO: make exceptions more verbose namespace gu { class Cond { friend class Lock; // non-copyable Cond(const Cond&); void operator=(const Cond&); protected: gu_cond_t mutable cond; int mutable ref_count; public: Cond () : cond(), ref_count(0) { gu_cond_init (&cond, NULL); } ~Cond () { int ret; while (EBUSY == (ret = gu_cond_destroy(&cond))) { usleep (100); } if (gu_unlikely(ret != 0)) { log_fatal << "gu_cond_destroy() failed: " << ret << " (" << strerror(ret) << ". Aborting."; ::abort(); } } inline void signal () const { if (ref_count > 0) { int ret = gu_cond_signal (&cond); if (gu_unlikely(ret != 0)) throw Exception("gu_cond_signal() failed", ret); } } inline void broadcast () const { if (ref_count > 0) { int ret = gu_cond_broadcast (&cond); if (gu_unlikely(ret != 0)) throw Exception("gu_cond_broadcast() failed", ret); } } }; } #endif // __GU_COND__ galera-4-26.4.25/galerautils/src/gu_abort.h000644 000164 177776 00000000650 15107057155 021573 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2013 Codership Oy /** * @file "Clean" abort function to avoid stack and core dumps * * $Id$ */ #ifndef _gu_abort_h_ #define _gu_abort_h_ #ifdef __cplusplus extern "C" { #endif #include "gu_macros.h" /* This function is for clean aborts, when we can't gracefully exit otherwise */ extern void gu_abort() GU_NORETURN; #ifdef __cplusplus } #endif #endif /* _gu_abort_h_ */ galera-4-26.4.25/galerautils/src/gu_utils++.cpp000644 000164 177776 00000002032 15107057155 022301 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2011 Codership Oy /** * @file General-purpose functions and templates * * $Id$ */ #include "gu_utils.hpp" #include "gu_string_utils.hpp" #include namespace gu { bool _to_bool (const std::string& s) { std::istringstream iss(s); bool ret; if ((iss >> ret).fail()) { /* if 1|0 didn't work, try true|false */ iss.clear(); iss.seekg(0); if ((iss >> std::boolalpha >> ret).fail()) { /* try on/off and yes/no */ std::string tmp(s); gu::trim(tmp); if (tmp.length() >=2 && tmp.length() <= 3) { std::transform (tmp.begin(), tmp.end(), tmp.begin(), static_cast(std::tolower)); if (tmp == "yes" || tmp == "on") return true; if (tmp == "off" || tmp == "no") return false; } throw NotFound(); } } return ret; } } // namespace gu galera-4-26.4.25/galerautils/src/gu_str.h000644 000164 177776 00000010574 15107057155 021302 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy */ #ifndef GU_STR_H #define GU_STR_H #include #include #include #include /*! * Append after position */ static inline char* gu_str_append(char* str, size_t* off, const char* app, size_t app_len) { char* tmp; assert(str == NULL || *(str + *off - 1) == '\0'); tmp = realloc(str, *off + app_len + 1); if (tmp != NULL) { memcpy(tmp + *off, app, app_len + 1); *off += app_len + 1; } return tmp; } /*! * Get next string after position */ static inline const char* gu_str_next(const char* str) { return strchr(str, '\0') + 1; } /*! * Advance position starting from over n */ static inline const char* gu_str_advance(const char* str, size_t n) { const char* ptr = str; while (n-- > 0) { ptr = gu_str_next(ptr); } return ptr; } /* * Utilities to construct and scan tables from null terminated strings. * The table format is the following: * * name\0\columns\0\rows\0 * colname0\0colname1\0... * elem00\0elem01\0elem02\0... * elem10\0elem11\0elem\12\... * . * . * . */ static inline char* gu_str_table_set_name(char* str, size_t* off, const char* name) { return gu_str_append(str, off, name, strlen(name)); } static inline const char* gu_str_table_get_name(const char* str) { return str; } static inline char* gu_str_table_append_size(char* str, size_t* off, size_t n) { char buf[10]; size_t len = snprintf(buf, sizeof(buf), "%zu", n); return gu_str_append(str, off, buf, len); } static inline char* gu_str_table_set_n_cols(char* str, size_t* off, size_t n) { return gu_str_table_append_size(str, off, n); } static inline size_t gu_str_table_get_n_cols(const char* str) { str = gu_str_advance(str, 1); return strtoul(str, NULL, 0); } static inline char* gu_str_table_set_n_rows(char* str, size_t* off, size_t n) { return gu_str_table_append_size(str, off, n); } static inline size_t gu_str_table_get_n_rows(const char* str) { str = gu_str_advance(str, 2); return strtoul(str, NULL, 0); } static inline char* gu_str_table_set_cols(char* str, size_t *off, size_t n, const char* cols[]) { size_t i; for (i = 0; i < n; ++i) { str = gu_str_append(str, off, cols[i], strlen(cols[i])); } return str; } static inline char* gu_str_table_append_row(char* str, size_t *off, size_t n, const char* row[]) { size_t i; for (i = 0; i < n; ++i) { str = gu_str_append(str, off, row[i], strlen(row[i])); } return str; } static inline const char* gu_str_table_get_cols(const char* str, size_t n, char const* row[]) { size_t i; str = gu_str_advance(str, 3); for (i = 0; i < n; i++) { row[i] = str; str = gu_str_next(str); } return str; } static inline const char* gu_str_table_rows_begin(const char* str, size_t n) { return gu_str_advance(str, 3 + n); } static inline const char* gu_str_table_row_get(const char* str, size_t n, char const* row[]) { size_t i; for (i = 0; i < n; ++i) { row[i] = str; str = gu_str_next(str); } return str; } static inline void gu_str_table_print_row(FILE* file, size_t n, const char* const row[]) { size_t i; for (i = 0; i < n; ++i) { fprintf(file, "%s ", row[i]); } fprintf(file, "\n"); } static inline void gu_str_table_print(FILE* file, const char* str) { size_t i; size_t n_cols, n_rows; const char* ptr; char const**vec; fprintf(file, "%s\n", gu_str_table_get_name(str)); n_cols = gu_str_table_get_n_cols(str); n_rows = gu_str_table_get_n_rows(str); vec = malloc(n_cols*sizeof(char*)); ptr = gu_str_table_get_cols(str, n_cols, vec); gu_str_table_print_row(file, n_cols, vec); for (i = 0; i < n_rows; ++i) { ptr = gu_str_table_row_get(ptr, n_cols, vec); gu_str_table_print_row(file, n_cols, vec); } free(vec); } #endif /* GU_STR_H */ galera-4-26.4.25/galerautils/src/gu_fifo.c000644 000164 177776 00000035433 15107057155 021411 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * Queue (FIFO) class implementation * * The driving idea behind this class is avoiding mallocs * at all costs on one hand, on the other - make it almost * as infinite as an ordinary linked list. FIFO properties * help achieving that. * * When needed this FIFO can be made very big, holding * millions or even billions of items while taking up * minimum space when there are few items in the queue. */ #define _DEFAULT_SOURCE #include #include #include #include #include "gu_assert.h" #include "gu_limits.h" #include "gu_mem.h" #include "gu_threads.h" #include "gu_log.h" #include "gu_fifo.h" #include "galerautils.h" struct gu_fifo { ulong col_shift; ulong col_mask; ulong rows_num; ulong head; ulong tail; ulong row_size; ulong length; ulong length_mask; ulong alloc; long get_wait; long put_wait; long long q_len; long long q_len_samples; uint item_size; uint used; uint used_max; uint used_min; int get_err; bool closed; #ifndef NDEBUG bool locked; #endif gu_mutex_t lock; gu_cond_t get_cond; gu_cond_t put_cond; void* rows[]; }; /* Don't make rows less than 1K */ #define GCS_FIFO_MIN_ROW_POWER 10 typedef unsigned long long ull; /* constructor */ gu_fifo_t *gu_fifo_create (size_t length, size_t item_size) { int row_pwr = GCS_FIFO_MIN_ROW_POWER; ull row_len = 1 << row_pwr; ull row_size = row_len * item_size; int array_pwr = 1; // need at least 2 rows for alteration ull array_len = 1 << array_pwr; ull array_size = array_len * sizeof(void*); gu_fifo_t *ret = NULL; if (length > 0 && item_size > 0) { /* find the best ratio of width and height: * the size of a row array must be equal to that of the row */ while (array_len * row_len < length) { if (array_size < row_size) { array_pwr++; array_len = 1 << array_pwr; array_size = array_len * sizeof(void*); } else { row_pwr++; row_len = 1 << row_pwr; row_size = row_len * item_size; } } ull alloc_size = array_size + sizeof (gu_fifo_t); if (sizeof(alloc_size) > sizeof(size_t) && alloc_size > SIZE_MAX) { gu_error ("Initial FIFO size %llu exceeds size_t range %zu", alloc_size, (size_t)-1); return NULL; } ull max_size = array_len * row_size + alloc_size; if (sizeof(max_size) > sizeof(size_t) && max_size > SIZE_MAX) { gu_error ("Maximum FIFO size %llu exceeds size_t range %zu", max_size, (size_t)-1); return NULL; } if (max_size > gu_avphys_bytes()) { gu_error ("Maximum FIFO size %llu exceeds available memory " "limit %zu", max_size, gu_avphys_bytes()); return NULL; } if ((array_len * row_len) > (ull)GU_LONG_MAX) { gu_error ("Resulting queue length %llu exceeds max allowed %ld", array_len * row_len, GU_LONG_MAX); return NULL; } gu_debug ("Creating FIFO buffer of %llu elements of size %zu, " "memory min used: %llu, max used: %llu", array_len * row_len, item_size, alloc_size, alloc_size + array_len*row_size); ret = gu_malloc (alloc_size); if (ret) { memset (ret, 0, alloc_size); ret->col_shift = row_pwr; ret->col_mask = row_len - 1; ret->rows_num = array_len; ret->length = row_len * array_len; ret->length_mask = ret->length - 1; ret->item_size = item_size; ret->row_size = row_size; ret->alloc = alloc_size; gu_mutex_init (&ret->lock, NULL); gu_cond_init (&ret->get_cond, NULL); gu_cond_init (&ret->put_cond, NULL); } else { gu_error ("Failed to allocate %llu bytes for FIFO", alloc_size); } } return ret; } // defined as macro for proper line reporting #ifdef NDEBUG #define fifo_lock(q) \ if (gu_likely (0 == gu_mutex_lock (&q->lock))) {} \ else { \ gu_fatal ("Failed to lock queue"); \ abort(); \ } #else /* NDEBUG */ #define fifo_lock(q) \ if (gu_likely (0 == gu_mutex_lock (&q->lock))) { \ q->locked = true; \ } \ else { \ gu_fatal ("Failed to lock queue"); \ abort(); \ } #endif /* NDEBUG */ static inline int fifo_unlock (gu_fifo_t* q) { #ifndef NDEBUG q->locked = false; #endif return -gu_mutex_unlock (&q->lock); } #ifndef NDEBUG bool gu_fifo_locked (gu_fifo_t* q) { return q->locked; } #endif /* lock the queue */ void gu_fifo_lock (gu_fifo_t *q) { fifo_lock(q); } /* unlock the queue */ void gu_fifo_release (gu_fifo_t *q) { fifo_unlock(q); } static int fifo_flush (gu_fifo_t* q) { int ret = 0; /* if there are items in the queue, wait until they are all fetched */ while (q->used > 0 && 0 == ret) { /* will make getters to signal every time item is removed */ gu_warn ("Waiting for %u items to be fetched.", q->used); q->put_wait++; ret = gu_cond_wait (&q->put_cond, &q->lock); } return ret; } static void fifo_close (gu_fifo_t* q) { if (!q->closed) { q->closed = true; /* force putters to quit */ /* don't overwrite existing get_err status, see gu_fifo_resume_gets() */ if (!q->get_err) q->get_err = -ENODATA; // signal all the idle waiting threads gu_cond_broadcast (&q->put_cond); q->put_wait = 0; gu_cond_broadcast (&q->get_cond); q->get_wait = 0; #if 0 (void) fifo_flush (q); #endif } } void gu_fifo_close (gu_fifo_t* q) { fifo_lock (q); fifo_close (q); fifo_unlock (q); } void gu_fifo_open (gu_fifo_t* q) { fifo_lock (q); q->closed = false; q->get_err = 0; fifo_unlock (q); } /* lock the queue and wait if it is empty */ static inline int fifo_lock_get (gu_fifo_t *q) { int ret = 0; fifo_lock(q); while (0 == ret && !(ret = q->get_err) && 0 == q->used) { q->get_wait++; #ifndef NDEBUG q->locked = false; #endif ret = -gu_cond_wait (&q->get_cond, &q->lock); #ifndef NDEBUG q->locked = true; #endif } return ret; } /* unlock the queue after getting item */ static inline int fifo_unlock_get (gu_fifo_t *q) { assert (q->used < q->length || 0 == q->length); if (q->put_wait > 0) { q->put_wait--; gu_cond_signal (&q->put_cond); } return fifo_unlock(q); } /* lock the queue and wait if it is full */ static inline int fifo_lock_put (gu_fifo_t *q) { int ret = 0; fifo_lock(q); while (0 == ret && q->used == q->length && !q->closed) { #ifndef NDEBUG q->locked = false; #endif q->put_wait++; ret = -gu_cond_wait (&q->put_cond, &q->lock); #ifndef NDEBUG q->locked = true; #endif } return ret; } /* unlock the queue after putting an item */ static inline int fifo_unlock_put (gu_fifo_t *q) { assert (q->used > 0); if (q->get_wait > 0) { q->get_wait--; gu_cond_signal (&q->get_cond); } return fifo_unlock(q); } #define FIFO_ROW(q,x) ((x) >> q->col_shift) /* div by row width */ #define FIFO_COL(q,x) ((x) & q->col_mask) /* remnant */ #define FIFO_PTR(q,x) \ ((uint8_t*)q->rows[FIFO_ROW(q, x)] + FIFO_COL(q, x) * q->item_size) /* Increment and roll over */ #define FIFO_INC(q,x) (((x) + 1) & q->length_mask) /*! If FIFO is not empty, returns pointer to the head item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ void* gu_fifo_get_head (gu_fifo_t* q, int* err) { *err = fifo_lock_get (q); if (gu_likely(-ECANCELED != *err && q->used)) { return (FIFO_PTR(q, q->head)); } else { assert (q->get_err); fifo_unlock (q); return NULL; } } /*! Unprotected helper for gu_fifo_pop_head() and gu_fifo_clear() */ static inline void fifo_advance_head (gu_fifo_t* q) { if (FIFO_COL(q, q->head) == q->col_mask) { /* removing last unit from the row */ ulong row = FIFO_ROW (q, q->head); assert (q->rows[row] != NULL); gu_free (q->rows[row]); q->rows[row] = NULL; q->alloc -= q->row_size; } q->head = FIFO_INC(q, q->head); q->used--; if (gu_unlikely(q->used < q->used_min)) { q->used_min = q->used; } } /*! Advances FIFO head and unlocks FIFO. */ void gu_fifo_pop_head (gu_fifo_t* q) { fifo_advance_head(q); if (fifo_unlock_get(q)) { gu_fatal ("Faled to unlock queue to get item."); abort(); } } /*! If FIFO is not full, returns pointer to the tail item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ void* gu_fifo_get_tail (gu_fifo_t* q) { fifo_lock_put (q); if (gu_likely(!q->closed)) { // stop adding items when closed ulong row = FIFO_ROW(q, q->tail); assert (q->used < q->length); // check if row is allocated and allocate if not. if (NULL == q->rows[row] && NULL == (q->alloc += q->row_size, q->rows[row] = gu_malloc(q->row_size))) { q->alloc -= q->row_size; } else { return ((uint8_t*)q->rows[row] + FIFO_COL(q, q->tail) * q->item_size); } #if 0 // for debugging if (NULL == q->rows[row]) { gu_debug ("Allocating row %lu of queue %p, rows %p", row, q, q->rows); if (NULL == (q->rows[row] = gu_malloc(q->row_size))) { gu_debug ("Allocating row %lu failed", row); fifo_unlock (q); return NULL; } q->alloc += q->row_size; } return (q->rows[row] + FIFO_COL(q, q->tail) * q->item_size); #endif } fifo_unlock (q); return NULL; } /*! Advances FIFO tail and unlocks FIFO. */ void gu_fifo_push_tail (gu_fifo_t* q) { q->tail = FIFO_INC(q, q->tail); q->q_len += q->used; q->used++; if (gu_unlikely(q->used > q->used_max)) { q->used_max = q->used; } q->q_len_samples++; if (fifo_unlock_put(q)) { gu_fatal ("Faled to unlock queue to put item."); abort(); } } /*! returns how many items are in the queue */ long gu_fifo_length (gu_fifo_t* q) { return q->used; } /*! returns how many items were in the queue per push_tail() */ void gu_fifo_stats_get (gu_fifo_t* q, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg) { fifo_lock (q); *q_len = q->used; *q_len_max = q->used_max; *q_len_min = q->used_min; long long len = q->q_len; long long samples = q->q_len_samples; fifo_unlock (q); if (len >= 0 && samples >= 0) { if (samples > 0) { *q_len_avg = ((double)len) / samples; } else { assert (0 == len); *q_len_avg = 0.0; } } else { *q_len_avg = -1.0; } } void gu_fifo_stats_flush(gu_fifo_t* q) { fifo_lock (q); q->used_max = q->used; q->used_min = q->used; q->q_len = 0; q->q_len_samples = 0; fifo_unlock (q); } void gu_fifo_clear(gu_fifo_t* q) { fifo_lock (q); while (q->used > 0) fifo_advance_head(q); fifo_unlock (q); } /* destructor - would block until all members are dequeued */ void gu_fifo_destroy (gu_fifo_t *queue) { fifo_lock (queue); { if (!queue->closed) fifo_close(queue); fifo_flush (queue); } fifo_unlock (queue); assert (queue->tail == queue->head); while (gu_cond_destroy (&queue->put_cond)) { fifo_lock (queue); gu_cond_signal (&queue->put_cond); fifo_unlock (queue); /* when thread sees that ret->used == 0, it must terminate */ } while (gu_cond_destroy (&queue->get_cond)) { fifo_lock (queue); gu_cond_signal (&queue->get_cond); fifo_unlock (queue); /* when thread sees that ret->used == 0, it must terminate */ } while (gu_mutex_destroy (&queue->lock)) continue; /* only one row might be left */ { ulong row = FIFO_ROW(queue, queue->tail); if (queue->rows[row]) { assert (FIFO_COL(queue, queue->tail) != 0); gu_free (queue->rows[row]); queue->alloc -= queue->row_size; } else { assert (FIFO_COL(queue, queue->tail) == 0); } gu_free (queue); } } char *gu_fifo_print (gu_fifo_t *queue) { size_t tmp_len = 4096; char tmp[tmp_len]; char *ret; snprintf (tmp, tmp_len, "Queue (%p):\n" "\tlength = %lu\n" "\trows = %lu\n" "\tcolumns = %lu\n" "\tused = %u (%zu bytes)\n" "\talloctd = %lu bytes\n" "\thead = %lu, tail = %lu\n" "\tavg.len = %f" //", next = %lu" , (void*)queue, queue->length, queue->rows_num, queue->col_mask + 1, queue->used, (size_t)queue->used * queue->item_size, queue->alloc, queue->head, queue->tail, queue->q_len_samples > 0 ? ((double)queue->q_len)/queue->q_len_samples : 0.0 //, queue->next ); ret = strdup (tmp); return ret; } int gu_fifo_cancel_gets (gu_fifo_t* q) { assert(q->locked); if (q->get_err && -ENODATA != q->get_err) { gu_error ("Attempt to cancel FIFO gets in state: %d (%s)", q->get_err, strerror(-q->get_err)); return -EBADFD; } assert (!q->get_err || q->closed); q->get_err = -ECANCELED; /* force getters to quit with specific error */ if (q->get_wait) { gu_cond_broadcast (&q->get_cond); q->get_wait = 0; } return 0; } int gu_fifo_resume_gets (gu_fifo_t* q) { int ret = -1; fifo_lock(q); if (-ECANCELED == q->get_err) { q->get_err = q->closed ? -ENODATA : 0; ret = 0; } else { gu_error ("Attempt to resume FIFO gets in state: %d (%s)", q->get_err, strerror(-q->get_err)); ret = -EBADFD; } fifo_unlock(q); return ret; } galera-4-26.4.25/galerautils/src/gu_threads.c000644 000164 177776 00000017656 15107057155 022127 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2017 Codership Oy /** * Debug versions of thread functions */ #include "gu_threads.h" #include "gu_macros.h" #include "gu_log.h" #include #include #include // strerror() #ifdef GU_DEBUG_MUTEX int gu_mutex_init_DBG (gu_mutex_t_DBG *m, const gu_mutexattr_t_SYS* attr, const char *file, unsigned int line) { gu_mutex_init_SYS(&m->mutex, attr); gu_cond_init_SYS(&m->cond, NULL); m->thread = gu_thread_self_SYS(); m->file = file; m->line = line; m->waiter_count = 0; m->cond_waiter_count = 0; m->locked = false; return 0; // as per pthread spec } static inline void _wait_unlocked(gu_mutex_t_DBG* m) { m->waiter_count++; gu_cond_wait_SYS(&m->cond, &m->mutex); assert(m->waiter_count > 0); m->waiter_count--; } int gu_mutex_lock_DBG(gu_mutex_t_DBG *m, const char *file, unsigned int line) { gu_thread_t_SYS const self = gu_thread_self_SYS(); int const err = gu_mutex_lock_SYS(&m->mutex); if (gu_likely(0 == err)) { while (m->locked) { if (gu_thread_equal_SYS(self, m->thread)) { gu_fatal("Second mutex lock attempt by the same thread, %lx, " "at %s:%d, first locked at %s:%d", self, file, line, m->file, m->line); abort(); } _wait_unlocked(m); } m->locked = true; m->thread = self; m->file = file; m->line = line; gu_mutex_unlock_SYS(&m->mutex); } return err; } int gu_mutex_unlock_DBG (gu_mutex_t_DBG *m, const char *file, unsigned int line) { gu_thread_t_SYS const self = gu_thread_self_SYS(); int err = gu_mutex_lock_SYS(&m->mutex); if (gu_likely(0 == err)) { if (m->locked && !gu_thread_equal_SYS(self, m->thread)) { /** last time pthread_t was unsigned long int */ gu_fatal ("%lx attempts to unlock mutex at %s:%d " "locked by %lx at %s:%d", self, file, line, m->thread, m->file, m->line); assert(0); err = EPERM; /** return in case assert is undefined */ } /** must take into account that mutex unlocking can happen in * cleanup handlers when thread is terminated in cond_wait(). * Then holder_count would still be 0 (see gu_cond_wait()), * but cond_waiter - not */ else if (!m->locked && m->cond_waiter_count == 0) { gu_error ("%lx attempts to unlock unlocked mutex at %s:%d. " "Last use at %s:%d", self, file, line, m->file ? m->file : "" , m->line); assert(0 == m->waiter_count); assert(0); } else { m->file = file; m->line = line; m->locked = false; if (m->waiter_count > 0) gu_cond_signal_SYS(&m->cond); } gu_mutex_unlock_SYS(&m->mutex); } return err; } int gu_mutex_destroy_DBG (gu_mutex_t_DBG *m, const char *file, unsigned int line) { gu_thread_t_SYS const self = gu_thread_self_SYS(); int err = gu_mutex_lock_SYS(&m->mutex); if (gu_likely(0 == err)) { if (!m->file) { gu_fatal("%lx attempts to destroy uninitialized mutex at %s:%d", self, file, line); assert(0); } else if (m->locked) { if (gu_thread_equal_SYS(self, m->thread)) { gu_error ("%lx attempts to destroy mutex locked by " "itself at %s:%d", self, m->file, m->line); } else { gu_error ("%lx attempts to destroy a mutex at %s:%d " "locked by %lu at %s:%d (not error)", self, file, line, m->thread, m->file, m->line); } assert (0); /* logical error in program */ err = EBUSY; } else if (m->cond_waiter_count != 0) { gu_error ("%lx attempts to destroy a mutex at %s:%d " "that is waited by %d thread(s)", self, file, line, m->cond_waiter_count); assert (m->cond_waiter_count > 0); abort(); } else { assert(!m->locked); assert(0 == m->cond_waiter_count); gu_mutex_unlock_SYS(&m->mutex); if ((err = gu_mutex_destroy_SYS(&m->mutex))) { gu_debug("Error (%d: %s, %d) during mutex destroy at %s:%d", err, strerror(err), errno, file, line); } else { gu_cond_destroy_SYS(&m->cond); m->file = NULL; m->line = 0; m->thread = GU_THREAD_INITIALIZER_SYS; } return err; } gu_mutex_unlock_SYS(&m->mutex); } return err; } int gu_cond_twait_DBG (gu_cond_t_SYS *cond, gu_mutex_t_DBG *m, const struct timespec *abstime, const char *file, unsigned int line) { gu_thread_t_SYS const self = gu_thread_self_SYS(); int err = gu_mutex_lock_SYS(&m->mutex); if (gu_likely(!err)) { if (gu_unlikely(!m->locked && 0 == m->cond_waiter_count)) { gu_fatal ("%lx tries to wait for condition on unlocked mutex " "at %s %d", self, file, line); assert (0); } else if (!gu_thread_equal_SYS(self, m->thread)) { gu_fatal ("%lx tries to wait for condition on the mutex locked " "by %lx at %s %d", self, m->thread, file, line); assert (0); } /** gu_cond_wait_SYS frees the mutex */ m->locked = false; m->thread = self; m->file = file; m->line = line; if (m->waiter_count > 0) gu_cond_signal_SYS(&m->cond); m->cond_waiter_count++; if (NULL == abstime) err = gu_cond_wait_SYS (cond, &m->mutex); else err = gu_cond_timedwait_SYS (cond, &m->mutex, abstime); assert(m->cond_waiter_count > 0); m->cond_waiter_count--; /* now wait till the the mutex is "unlocked" */ while (m->locked && 0 == err) { _wait_unlocked(m); } m->locked = true; assert(!gu_thread_equal_SYS(self, m->thread) || 0 != err); m->thread = self; m->file = file; m->line = line; gu_mutex_unlock_SYS(&m->mutex); } return err; } #endif /* GU_DEBUG_MUTEX */ #if defined(__APPLE__) int gu_barrier_init_SYS (gu_barrier_t_SYS *barrier, const gu_barrierattr_t_SYS *attr, unsigned int count) { if(count == 0) { errno = EINVAL; return -1; } if(gu_mutex_init_SYS (&barrier->mutex, 0) < 0) { return -1; } if(gu_cond_init_SYS (&barrier->cond, 0) < 0) { gu_mutex_destroy_SYS (&barrier->mutex); return -1; } barrier->tripCount = count; barrier->count = 0; return 0; } int gu_barrier_destroy_SYS (gu_barrier_t_SYS *barrier) { gu_cond_destroy_SYS (&barrier->cond); gu_mutex_destroy_SYS (&barrier->mutex); return 0; } int gu_barrier_wait_SYS (gu_barrier_t_SYS *barrier) { gu_mutex_lock_SYS (&barrier->mutex); ++(barrier->count); if(barrier->count >= barrier->tripCount) { barrier->count = 0; gu_cond_broadcast_SYS (&barrier->cond); gu_mutex_unlock_SYS (&barrier->mutex); return GU_BARRIER_THREAD_SYS; } else { gu_cond_wait_SYS (&barrier->cond, &(barrier->mutex)); gu_mutex_unlock_SYS (&barrier->mutex); return !GU_BARRIER_THREAD_SYS; } } #endif /* __APPLE__ */ galera-4-26.4.25/galerautils/src/gu_datetime.hpp000644 000164 177776 00000016771 15107057155 022633 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ /*! * @file Date/time manipulation classes providing nanosecond resolution. */ #ifndef __GU_DATETIME__ #define __GU_DATETIME__ #include "gu_exception.hpp" #include "gu_time.h" #include #include #include namespace gu { namespace datetime { /* Multiplier constants */ const long long NSec = 1; const long long USec = 1000*NSec; const long long MSec = 1000*USec; const long long Sec = 1000*MSec; const long long Min = 60*Sec; const long long Hour = 60*Min; const long long Day = 24*Hour; const long long Month = 30*Day; const long long Year = 12*Month; /*! * @brief Class representing time periods instead of * system clock time. */ class Period { public: /*! * @brief Construct gu::datetime::Period from string * * This constructor accepts a string that contains a duration * represented in ISO8601 format. Alternatively, it accepts a * string that represents a double duration in number of seconds. * * The ISO8601 duration format is PnYnMnDTnHnMnS where Y is year, * M is month, D is day, T is the time designator separating date * and time parts, H denotes hours, M (after T) is minutes and S * seconds. * * All other n:s are expected to be integers except the one * before S which can be decimal to represent fractions of second. * * @param str Time period represented in ISO8601 duration format, * or number of seconds represented as double. * * @throws NotFound */ Period(const std::string& str = "") : nsecs() { if (str != "") parse(str); } Period(const long long nsecs_) : nsecs(nsecs_) { } static Period min() { return 0; } static Period max() { return std::numeric_limits::max();} bool operator==(const Period& cmp) const { return (nsecs == cmp.nsecs); } bool operator<(const Period& cmp) const { return (nsecs < cmp.nsecs); } bool operator>=(const Period& cmp) const { return !(*this < cmp); } Period operator+(const long long add) const { return (nsecs + add); } Period operator-(const long long dec) const { return (nsecs - dec); } Period operator*(const long long mul) const { return (nsecs*mul); } Period operator/(const long long div) const { return (nsecs/div); } long long get_nsecs() const { return nsecs; } Period operator+(const Period& add) const { return (nsecs + add.nsecs); } Period operator-(const Period& dec) const { return (nsecs - dec.nsecs); } private: friend class Date; friend std::istream& operator>>(std::istream&, Period&); /*! * @brief Parse period string. */ void parse(const std::string&); long long nsecs; }; // Clock simulation for unit tests which need determinism. class SimClock { public: /* Init with start time */ static void init(long long start_time) { counter_ = start_time; initialized_ = true; } /* Return true if has been initialized. */ static bool initialized() { return initialized_; } /* Get current time */ static long long get_time() { return counter_; } /* Increment time with step nanoseconds. */ static void inc_time(long long step) { counter_ += step; } private: static long long counter_; static bool initialized_; }; /*! * @brief Date/time representation. * * @todo Parsing date from string is not implemented yet, * only possible to get current system time or * maximum time. */ class Date { public: /*! * @brief Get time from system-wide realtime clock. */ static inline Date calendar() { if (SimClock::initialized()) return SimClock::get_time(); else return gu_time_calendar(); } /*! * @brief Get time from monotonic clock. */ static inline Date monotonic() { if (SimClock::initialized()) return SimClock::get_time(); else return gu_time_monotonic(); } /*! * @brief Get maximum representable timestamp. */ static inline Date max() { return std::numeric_limits::max(); } /*! * @brief Get zero time */ static inline Date zero() { return 0; } /*! * Return 64-bit timestamp representing system time in nanosecond * resolution. */ long long get_utc() const { return utc; } /* Standard comparison operators */ bool operator==(const Date cmp) const { return (utc == cmp.utc); } bool operator<(const Date cmp) const { return (utc < cmp.utc); } /*! * @brief Add period to Date */ Date operator+(const Period& add) const { return (utc + add.get_nsecs()); } /*! * @brief Decrement period from Date */ Date operator-(const Period& dec) const { return (utc - dec.get_nsecs()); } Period operator-(const Date& dec) const { return (utc - dec.utc); } Date(const long long utc_ = 0) : utc(utc_) { } /*! convert to timespec - for internal use */ void _timespec(timespec& ts) const { ts.tv_sec = utc / 1000000000L; ts.tv_nsec = utc % 1000000000L; } private: long long utc; /*!< System time in nanosecond precision */ /*! * @brief Parse date from string. * @todo Not implemented yet */ void parse(const std::string& str_); }; /*! * @brief Output operator for Date class. * @todo Not implemented yet */ std::ostream& operator<<(std::ostream&, const Date&); /*! * @brief Output operator for Period type. */ std::ostream& operator<<(std::ostream&, const Period&); inline std::string to_string(const Period& p) { std::ostringstream os; os << p; return os.str(); } inline double to_double(const Period& p) { return static_cast(p.get_nsecs()) / Sec; } inline std::istream& operator>>(std::istream& is, Period& p) { std::string str; is >> str; p.parse(str); return is; } } // namespace datetime } // namespace gu #endif // __GU_DATETIME__ galera-4-26.4.25/galerautils/src/gu_mem_pool.hpp000644 000164 177776 00000012754 15107057155 022643 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy */ /** * @file Self-adjusting pool of same size memory buffers. * * How it works: pool is never allowed to keep more than half of total * allocated buffers (plus min_count), so at least half of buffers must be * in use. As more than half goes out of use they will be deallocated rather * than placed back in the pool. * * $Id$ */ #ifndef _GU_MEM_POOL_HPP_ #define _GU_MEM_POOL_HPP_ #include "gu_lock.hpp" #include "gu_macros.hpp" #include #include #include namespace gu { typedef std::vector MemPoolVector; /* Since we specialize this template iwth thread_safe=true parameter below, * this makes it implicit thread_safe=false specialization. */ template class MemPool { public: explicit MemPool(int buf_size, int reserve = 0, const char* name = "") : pool_ (), hits_ (0), misses_ (0), allocd_ (0), name_ (name), buf_size_ (buf_size), reserve_ (reserve) { assert(buf_size_ > 0); assert(reserve >= 0); pool_.reserve(reserve_); } ~MemPool() { /* all buffers must be returned to pool before destruction */ assert(pool_.size() == allocd_); for (size_t i(0); i < pool_.size(); ++i) { assert(pool_[i]); free(pool_[i]); } } void* acquire() { void* ret(from_pool()); if (!ret) ret = alloc(); return ret; } void recycle(void* buf) { if (!to_pool(buf)) free(buf); } void print(std::ostream& os) const { double hr(hits_); if (hr > 0) { assert(misses_ > 0); hr /= hits_ + misses_; } os << "MemPool(" << name_ << "): hit ratio: " << hr << ", misses: " << misses_ << ", in use: " << allocd_ - pool_.size() << ", in pool: " << pool_.size(); } size_t buf_size() const { return buf_size_; } protected: /* from_pool() and to_pool() will need to be called under mutex * in thread-safe version, so all object data are modified there. * alloc() and free() then can be called outside critical section. */ void* from_pool() { void* ret(NULL); if (pool_.size() > 0) { ret = pool_.back(); assert(ret); pool_.pop_back(); ++hits_; } else { ++allocd_; ++misses_; } return ret; } // returns false if buffer can't be returned to pool bool to_pool(void* buf) { assert(buf); bool const ret(reserve_ + allocd_/2 > pool_.size()); if (ret) { pool_.push_back(buf); } else { assert(allocd_ > 0); --allocd_; } return ret; } void* alloc() { return (operator new(buf_size_)); } void free(void* const buf) { assert(buf); operator delete(buf); } friend class MemPool; private: MemPoolVector pool_; size_t hits_; size_t misses_; size_t allocd_; const char* const name_; unsigned int const buf_size_; unsigned int const reserve_; MemPool (const MemPool&); MemPool operator= (const MemPool&); }; /* class MemPool: thread-unsafe */ /* Thread-safe MemPool specialization. * Even though MemPool technically IS-A MemPool, the need to * overload nearly all public methods and practical uselessness of * polymorphism in this case make inheritance undesirable. */ template <> class MemPool { public: explicit MemPool(int buf_size, int reserve = 0, const char* name = "") : base_(buf_size, reserve, name), mtx_ () {} ~MemPool() {} void* acquire() { void* ret; { Lock lock(mtx_); ret = base_.from_pool(); } if (!ret) ret = base_.alloc(); return ret; } void recycle(void* buf) { bool pooled; { Lock lock(mtx_); pooled = base_.to_pool(buf); } if (!pooled) base_.free(buf); } void print(std::ostream& os) const { Lock lock(mtx_); base_.print(os); } size_t buf_size() const { return base_.buf_size(); } private: MemPool base_; Mutex mtx_; }; /* class MemPool: thread-safe */ template std::ostream& operator << (std::ostream& os, const MemPool& mp) { mp.print(os); return os; } typedef MemPool MemPoolUnsafe; typedef MemPool MemPoolSafe; } /* namespace gu */ #endif /* _GU_MEM_POOL_HPP_ */ galera-4-26.4.25/galerautils/src/gu_uri.cpp000644 000164 177776 00000020357 15107057155 021624 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! @todo: scheme and host parts should be normalized to lower case except for * %-encodings, which should go upper-case */ #include #include #include #include "gu_assert.h" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_string_utils.hpp" // strsplit() #include "gu_uri.hpp" using std::string; using std::vector; using std::multimap; static void parse_authority (const string& auth, gu::RegEx::Match& user, gu::RegEx::Match& host, gu::RegEx::Match& port) { size_t pos1, pos2; pos1 = auth.find_first_of ('@'); if (pos1 != string::npos) { user = gu::RegEx::Match (auth.substr(0, pos1)); pos1 += 1; // pos1 now points past the first occurence of @, // which may be past the end of the string. } else { pos1 = 0; } if (auth[pos1] == '[') { size_t close_bracket = auth.find_first_of(']', pos1); if (close_bracket == string::npos) { gu_throw_error (EINVAL) << "Expected ']' in " << auth; } pos2 = string::npos; if (close_bracket < auth.length() && auth[close_bracket + 1] == ':') { pos2 = close_bracket + 1; } } else { pos2 = auth.find_last_of (':'); if (auth.find_first_of (':') != pos2) { pos2 = string::npos; } } if (pos2 != string::npos && pos2 >= pos1) { host = gu::RegEx::Match (auth.substr (pos1, pos2 - pos1)); // according to RFC 3986 empty port (nothing after :) should be treated // as unspecified, so make sure that it is not 0-length. if ((pos2 + 1) < auth.length()) { port = gu::RegEx::Match (auth.substr (pos2 + 1)); if ((port.str().find_first_not_of ("0123456789") != string::npos) || // @todo: possible port range is not determined in RFC 3986 (65535 < gu::from_string (port.str()))) { log_debug << "\n\tauth: '" << auth << "'" << "\n\thost: '" << host.str() << "'" << "\n\tport: '" << port.str() << "'" << "\n\tpos1: " << pos1 << ", pos2: " << pos2; gu_throw_error (EINVAL) << "Can't parse port number from '" << port.str() << "'"; } } } else { host = gu::RegEx::Match (auth.substr (pos1)); } } static gu::URIQueryList extract_query_list(const string& s, const string& query) { gu::URIQueryList ret; // scan all key=value pairs vector qlist = gu::strsplit(query, '&'); for (vector::const_iterator i = qlist.begin(); i != qlist.end(); ++i) { vector kvlist = gu::strsplit(*i, '='); if (kvlist.size() != 2) { gu_throw_error (EINVAL) << "Invalid URI query part: '" << *i << "'"; } ret.insert(make_pair(kvlist[0], kvlist[1])); } return ret; } gu::URI::URI(const string& uri_str, bool const strict) : modified_ (true), // recompose to normalize on the first call to_string() str_ (uri_str), scheme_ (), authority_ (), path_ (), fragment_ (), query_list_() { parse(uri_str, strict); } /*! regexp suggested by RFC 3986 to parse URI into 5 canonical parts */ const char* const gu::URI::uri_regex_ = "^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?"; /* 12 3 4 5 6 7 8 9 */ /*! positions of URI components as matched by the above pattern */ enum { SCHEME = 2, AUTHORITY = 4, PATH = 5, QUERY = 7, FRAGMENT = 9, NUM_PARTS }; gu::RegEx const gu::URI::regex_(uri_regex_); static string const UNSET_SCHEME("unset://"); void gu::URI::parse (const string& uri_str, bool const strict) { log_debug << "URI: " << uri_str; vector parts; if (!strict && uri_str.find("://") == std::string::npos) { string tmp = UNSET_SCHEME + uri_str; parts = regex_.match (tmp, NUM_PARTS); } else { parts = regex_.match (uri_str, NUM_PARTS); scheme_ = parts[SCHEME]; //set scheme only if it was explicitly provided } if (strict && (!scheme_.is_set() || !scheme_.str().length())) { gu_throw_error (EINVAL) << "URI '" << uri_str << "' has empty scheme"; } try { std::vector auth_list( strsplit(parts[AUTHORITY].str(), ',')); for (size_t i(0); i < auth_list.size(); ++i) { Authority auth; parse_authority (auth_list[i], auth.user_, auth.host_, auth.port_); authority_.push_back(auth); } } catch (NotSet&) { authority_.push_back(Authority()); } path_ = parts[PATH]; if (!parts[AUTHORITY].is_set() && !path_.is_set()) { gu_throw_error (EINVAL) << "URI '" << uri_str << "' has no hierarchical part"; } try { query_list_ = extract_query_list(str_, parts[QUERY].str()); } catch (NotSet&) {} fragment_ = parts[FRAGMENT]; #if 0 try { log_debug << "Base URI: " << scheme.str() << "://" << get_authority(); } catch (NotSet&) {} #endif } std::string gu::URI::get_authority(const gu::URI::Authority& authority) const { const RegEx::Match& user(authority.user_); const RegEx::Match& host(authority.host_); const RegEx::Match& port(authority.port_); if (!user.is_set() && !host.is_set()) throw NotSet(); size_t auth_len = 0; if (user.is_set()) auth_len += user.str().length() + 1; if (host.is_set()) { auth_len += host.str().length(); if (port.is_set()) auth_len += port.str().length() + 1; } string auth; auth.reserve (auth_len); if (user.is_set()) { auth += user.str(); auth += '@'; } if (host.is_set()) { auth += host.str(); if (port.is_set()) { auth += ':'; auth += port.str(); } } return auth; } string gu::URI::get_authority() const { if (authority_.empty()) return ""; return get_authority(authority_.front()); } void gu::URI::recompose() const { size_t l = str_.length(); str_.clear (); str_.reserve (l); // resulting string length will be close to this if (scheme_.is_set()) { str_ += scheme_.str(); str_ += ':'; } str_ += "//"; for (AuthorityList::const_iterator i(authority_.begin()); i != authority_.end(); ++i) { AuthorityList::const_iterator i_next(i); ++i_next; try { string auth = get_authority(*i); str_ += auth; } catch (NotSet&) {} if (i_next != authority_.end()) str_ += ","; } if (path_.is_set()) str_ += path_.str(); if (query_list_.size() > 0) { str_ += '?'; } URIQueryList::const_iterator i = query_list_.begin(); while (i != query_list_.end()) { str_ += i->first + '=' + i->second; URIQueryList::const_iterator i_next = i; ++i_next; if (i_next != query_list_.end()) { str_ += '&'; } i = i_next; } if (fragment_.is_set()) { str_ += '#'; str_ += fragment_.str(); } } void gu::URI::set_query_param(const string& key, const string& val, bool override) { if (override == false) { query_list_.insert(make_pair(key, val)); } else { URIQueryList::iterator i(query_list_.find(key)); if (i == query_list_.end()) { query_list_.insert(make_pair(key, val)); } else { i->second = val; } } modified_ = true; } const std::string& gu::URI::get_option (const std::string& name) const { gu::URIQueryList::const_iterator i = query_list_.find(name); if (i == query_list_.end()) throw NotFound(); return i->second; } galera-4-26.4.25/galerautils/src/gu_signals.cpp000644 000164 177776 00000000647 15107057155 022465 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2021 Codership Oy #include "gu_signals.hpp" #include gu::Signals& gu::Signals::Instance() { static gu::Signals instance; return instance; } gu::Signals::signal_connection gu::Signals::connect( const gu::Signals::slot_type &subscriber) { return signal_.connect(subscriber); } void gu::Signals::signal(const gu::Signals::SignalType& type) { signal_(type); } galera-4-26.4.25/galerautils/src/gu_serialize.cpp000644 000164 177776 00000002100 15107057155 022776 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2017 Codership Oy */ #include "gu_serialize.hpp" #include namespace gu { class serialization_error_message { std::ostringstream os_; public: serialization_error_message(size_t a, size_t b) : os_() { os_ << a << " > " << b; } const std::ostringstream& os() const { return os_; } }; SerializationException::SerializationException(size_t a, size_t b) : Exception(serialization_error_message(a, b).os().str(), EMSGSIZE) {} class representation_error_message { std::ostringstream os_; public: representation_error_message(size_t need, size_t have) : os_() { os_ << need << " unrepresentable in " << have <<" bytes."; } const std::ostringstream& os() const { return os_; } }; RepresentationException::RepresentationException(size_t need, size_t have) : Exception(representation_error_message(need, have).os().str(), ERANGE) {} } /* namespace gu */ galera-4-26.4.25/galerautils/src/gu_asio_stream_react.cpp000644 000164 177776 00000101062 15107057155 024502 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020-2024 Codership Oy // #define GU_ASIO_IMPL #include "gu_asio_stream_react.hpp" #include "gu_asio_debug.hpp" #include "gu_asio_error_category.hpp" #include "gu_asio_io_service_impl.hpp" #include "gu_asio_socket_util.hpp" #include "gu_asio_utils.hpp" #ifndef ASIO_HAS_BOOST_BIND #define ASIO_HAS_BOOST_BIND #endif // ASIO_HAS_BOOST_BIND #include "asio/placeholders.hpp" #include "asio/read.hpp" #include "asio/write.hpp" #include static bool is_isolated() { const auto mode = gu::gu_asio_node_isolation_mode.load(std::memory_order_relaxed); switch (mode) { case WSREP_NODE_ISOLATION_NOT_ISOLATED: return false; case WSREP_NODE_ISOLATION_ISOLATED: return true; case WSREP_NODE_ISOLATION_FORCE_DISCONNECT: gu_throw_fatal << "Network reactor termination was requested by " "WSREP_NODE_ISOLATION_FORCE_DISCONNECT"; } return true; /* to keep compiler happy */ } gu::AsioStreamReact::AsioStreamReact( AsioIoService& io_service, const std::string& scheme, const std::shared_ptr& engine) : io_service_(io_service) , socket_(io_service_.impl().native()) , scheme_(scheme) , engine_(engine) , local_addr_() , remote_addr_() , connected_() , handshake_complete_() , non_blocking_(false) , in_progress_() , read_context_() , write_context_() { } gu::AsioStreamReact::~AsioStreamReact() { shutdown(); close(); } void gu::AsioStreamReact::open(const gu::URI& uri) try { auto resolve_result(resolve_tcp(io_service_.impl().native(), uri)); socket_.open(resolve_result->endpoint().protocol()); set_fd_options(socket_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error opening stream socket " << uri; } bool gu::AsioStreamReact::is_open() const try { return socket_.is_open(); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error checking if socket is open "; return false; } void gu::AsioStreamReact::shutdown() { if (not (in_progress_ & shutdown_in_progress) && engine_) { engine_->shutdown(); in_progress_ |= shutdown_in_progress; } } void gu::AsioStreamReact::close() try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::close"); if (not is_open()) { GU_ASIO_DEBUG(debug_print() << "Socket not open on close"); } socket_.close(); } // Catch all the possible exceptions here, not only asio ones. catch (const std::exception& e) { log_info << "Closing socket failed: " << e.what(); } void gu::AsioStreamReact::bind(const gu::AsioIpAddress& addr) try { ::bind(socket_, addr); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error in binding"; } void gu::AsioStreamReact::async_connect( const gu::URI& uri, const std::shared_ptr& handler) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::connect: " << uri); auto resolve_result(resolve_tcp(io_service_.impl().native(), uri)); if (not socket_.is_open()) { socket_.open(resolve_result->endpoint().protocol()); } connected_ = true; socket_.async_connect(*resolve_result, boost::bind(&AsioStreamReact::connect_handler, shared_from_this(), handler, asio::placeholders::error)); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error connecting "; } void gu::AsioStreamReact::async_write( const std::array& bufs, const std::shared_ptr& handler) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::async_write: buf pointer " << "ops in progress " << in_progress_); if (write_context_.buf().size()) { gu_throw_error(EBUSY) << "Trying to write into busy socket"; } if (not handshake_complete_) { gu_throw_error(EBUSY) << "Handshake in progress"; } write_context_ = WriteContext(bufs); start_async_write(&AsioStreamReact::write_handler, handler); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Async write failed '" << e.what(); } void gu::AsioStreamReact::async_read( const AsioMutableBuffer& buf, const std::shared_ptr& handler) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::async_read: buf pointer: " << buf.data() << " buf size: " << buf.size()); assert(not read_context_.buf().data()); if (not handshake_complete_) { gu_throw_error(EBUSY) << "Handshake in progress"; } assert(handshake_complete_); read_context_ = ReadContext(buf); start_async_read(&AsioStreamReact::read_handler, handler); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Async read failed '" << e.what(); } static void throw_sync_op_error(const gu::AsioStreamEngine& engine, const char* prefix) { auto last_error(engine.last_error()); if (last_error.is_system()) gu_throw_system_error(last_error.value()) << prefix << ": " << last_error.message(); else gu_throw_error(EPROTO) << prefix << ": " << last_error.message(); } void gu::AsioStreamReact::connect(const gu::URI& uri) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::connect: " << uri); auto resolve_result(resolve_tcp(io_service_.impl().native(), uri)); if (not socket_.is_open()) { socket_.open(resolve_result->endpoint().protocol()); set_fd_options(socket_); } socket_.connect(resolve_result->endpoint()); connected_ = true; prepare_engine(false); auto result(engine_->client_handshake()); switch (result) { case AsioStreamEngine::success: return; case AsioStreamEngine::want_read: case AsioStreamEngine::want_write: case AsioStreamEngine::eof: gu_throw_error(EPROTO) << "Got unexpected return from client handshake: " << result; break; default: throw_sync_op_error(*engine_, "Client handshake failed"); } } catch (asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to connect '" << uri << "': " << e.what(); } size_t gu::AsioStreamReact::write(const AsioConstBuffer& buf) try { assert(buf.size() > 0); set_non_blocking(false); auto write_result(engine_->write(buf.data(), buf.size())); switch (write_result.status) { case AsioStreamEngine::success: assert(write_result.bytes_transferred == buf.size()); return write_result.bytes_transferred; case AsioStreamEngine::want_read: case AsioStreamEngine::want_write: case AsioStreamEngine::eof: gu_throw_error(EPROTO) << "Got unexpected return from write: " << write_result.status; return 0; default: throw_sync_op_error(*engine_, "Failed to write"); return 0; // Keep compiler happy } } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to write: " << e.what(); } size_t gu::AsioStreamReact::read(const AsioMutableBuffer& buf) try { set_non_blocking(false); size_t total_transferred(0); do { auto read_result( engine_->read( static_cast(buf.data()) + total_transferred, buf.size() - total_transferred)); switch (read_result.status) { case AsioStreamEngine::success: total_transferred += read_result.bytes_transferred; break; case AsioStreamEngine::eof: return 0; case AsioStreamEngine::want_read: case AsioStreamEngine::want_write: gu_throw_error(EPROTO) << "Got unexpected return from read: " << read_result.status; return 0; default: throw_sync_op_error(*engine_, "Failed to read"); return 0; } } while (total_transferred != buf.size()); return total_transferred; } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to read: " << e.what(); } std::string gu::AsioStreamReact::local_addr() const { return local_addr_; } std::string gu::AsioStreamReact::remote_addr() const { return remote_addr_; } void gu::AsioStreamReact::set_receive_buffer_size(size_t size) try { assert(not connected_); ::set_receive_buffer_size(socket_, size); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error setting receive buffer size"; } size_t gu::AsioStreamReact::get_receive_buffer_size() try { return ::get_receive_buffer_size(socket_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error getting receive buffer size "; } void gu::AsioStreamReact::set_send_buffer_size(size_t size) try { assert(not connected_); ::set_send_buffer_size(socket_, size); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error setting send buffer size"; } size_t gu::AsioStreamReact::get_send_buffer_size() try { return ::get_send_buffer_size(socket_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error getting send buffer size"; } struct tcp_info gu::AsioStreamReact::get_tcp_info() try { return ::get_tcp_info(socket_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error getting TCP info"; } void gu::AsioStreamReact::complete_client_handshake( const std::shared_ptr& handler, AsioStreamEngine::op_status result) try { GU_ASIO_DEBUG(debug_print() << " complete_client_handshake " << result); switch (result) { case AsioStreamEngine::success: handshake_complete_ = true; handler->connect_handler(*this, AsioErrorCode()); break; case AsioStreamEngine::want_read: start_async_read(&AsioStreamReact::client_handshake_handler, handler); break; case AsioStreamEngine::want_write: start_async_write(&AsioStreamReact::client_handshake_handler, handler); break; case AsioStreamEngine::eof: handler->connect_handler(*this, AsioErrorCode(asio::error::misc_errors::eof, gu_asio_misc_category)); break; case AsioStreamEngine::error: handler->connect_handler(*this, engine_->last_error()); break; default: handler->connect_handler(*this, AsioErrorCode(EPROTO)); break; assert(0); } } catch (const asio::system_error& e) { handler->connect_handler(*this, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::connect_handler( const std::shared_ptr& handler, const asio::error_code& ec) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::connect_handler: " << ec); if (ec) { handler->connect_handler(*this, AsioErrorCode(ec.value(), ec.category())); close(); return; } set_socket_options(socket_); prepare_engine(true); assign_addresses(); GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::connect_handler: init handshake"); auto result(engine_->client_handshake()); // Perform wait to complete IO operation. socket_.async_wait( socket_.wait_write, [handler, result, this](const asio::error_code& ec) { if (ec) { handler->connect_handler(*this, AsioErrorCode(ec.value(), ec.category())); close(); return; } complete_client_handshake(handler, result); }); } catch (const asio::system_error& e) { handler->connect_handler(*this, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::client_handshake_handler( const std::shared_ptr& handler, const asio::error_code& ec) try { // During handshake there is only read or write in progress // at the time. Therefore safe to clear both flags. in_progress_ &= ~(read_in_progress | write_in_progress); GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::client_handshake_handler: " << ec); if (ec) { handler->connect_handler( *this, AsioErrorCode(ec.value(), ec.category())); close(); return; } if (is_isolated()) { handle_isolation_error(handler); return; } auto result(engine_->client_handshake()); GU_ASIO_DEBUG(debug_print() << "AsioStreamReact::client_handshake_handler: result from engine: " << result); switch (result) { case AsioStreamEngine::success: handshake_complete_ = true; handler->connect_handler( *this, AsioErrorCode(ec.value(), ec.category())); break; case AsioStreamEngine::want_read: start_async_read(&AsioStreamReact::client_handshake_handler, handler); break; case AsioStreamEngine::want_write: start_async_write(&AsioStreamReact::client_handshake_handler, handler); break; case AsioStreamEngine::eof: handler->connect_handler(*this, AsioErrorCode(asio::error::misc_errors::eof, gu_asio_misc_category)); break; case AsioStreamEngine::error: handler->connect_handler(*this, engine_->last_error()); break; default: assert(0); handler->connect_handler(*this, AsioErrorCode(EPROTO)); break; } } catch (const asio::system_error& e) { handler->connect_handler(*this, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::complete_server_handshake( const std::shared_ptr& handler, AsioStreamEngine::op_status result) try { GU_ASIO_DEBUG(debug_print() << "AsioStreamReact::server_handshake_handler: " << "result from engine: " << result); switch (result) { case AsioStreamEngine::success: handshake_complete_ = true; handler->connect_handler(*this, AsioErrorCode()); break; case AsioStreamEngine::want_read: start_async_read(&AsioStreamReact::server_handshake_handler, handler); break; case AsioStreamEngine::want_write: start_async_write(&AsioStreamReact::server_handshake_handler, handler); break; case AsioStreamEngine::error: handler->connect_handler(*this, engine_->last_error()); break; case AsioStreamEngine::eof: handler->connect_handler(*this, AsioErrorCode::make_eof()); break; } } catch (const asio::system_error& e) { handler->connect_handler(*this, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::server_handshake_handler( const std::shared_ptr& handler, const asio::error_code& ec) try { // During handshake there is only read or write in progress // at the time. Therefore safe to clear both flags. in_progress_ &= ~(read_in_progress | write_in_progress); if (ec) { handler->connect_handler(*this, AsioErrorCode(ec.value(), ec.category())); return; } if (is_isolated()) { throw asio::system_error(asio::error::basic_errors::operation_aborted); } auto result = engine_->server_handshake(); auto self = shared_from_this(); // Clear possible write IO in_progress_ &= write_in_progress; socket_.async_wait(socket_.wait_write, [handler, result, self](const asio::error_code& ec) { self->complete_server_handshake(handler, result); }); } catch (const asio::system_error& e) { handler->connect_handler(*this, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::read_handler( const std::shared_ptr& handler, const asio::error_code& ec) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: " << ec); in_progress_ &= ~read_in_progress; if (in_progress_ & shutdown_in_progress) return; if (ec) { handle_read_handler_error(handler, AsioErrorCode(ec.value(), ec.category())); return; } if (is_isolated()) { handle_isolation_error(handler); return; } const size_t left_to_read(read_context_.left_to_read()); assert(left_to_read <= read_context_.buf().size() - read_context_.bytes_transferred()); GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: left_to_read: " << left_to_read); auto read_result( engine_->read(reinterpret_cast(read_context_.buf().data()) + read_context_.bytes_transferred(), left_to_read)); GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: bytes_read: " << read_result.bytes_transferred); if (read_result.bytes_transferred) { complete_read_op(handler, read_result.bytes_transferred); } switch (read_result.status) { case AsioStreamEngine::success: // In case more reads were needed to transfer all data, the // read operation was started in complete_read_op(). break; case AsioStreamEngine::want_read: GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: " << "would block/want read"); start_async_read(&AsioStreamReact::read_handler, handler); break; case AsioStreamEngine::want_write: GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: want write"); start_async_write(&AsioStreamReact::read_handler, handler); break; case AsioStreamEngine::eof: GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: eof"); handle_read_handler_error( handler, AsioErrorCode(asio::error::misc_errors::eof, gu_asio_misc_category)); break; case AsioStreamEngine::error: GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::read_handler: Read error: " << ec.message() << " status " << read_result.status); handle_read_handler_error(handler, engine_->last_error()); break; } } catch (const asio::system_error& e) { handle_read_handler_error(handler, AsioErrorCode(e.code().value())); } void gu::AsioStreamReact::write_handler( const std::shared_ptr& handler, const asio::error_code& ec) try { GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::write_handler: " << ec); in_progress_ &= ~write_in_progress; if (in_progress_ & shutdown_in_progress) return; if (ec) { handle_write_handler_error(handler, AsioErrorCode(ec.value(), ec.category())); return; } if (is_isolated()) { handle_isolation_error(handler); return; } AsioStreamEngine::op_result write_result( engine_->write( write_context_.buf().data() + write_context_.bytes_transferred(), write_context_.buf().size() - write_context_.bytes_transferred())); if (write_result.bytes_transferred) { complete_write_op(handler, write_result.bytes_transferred); } switch (write_result.status) { case AsioStreamEngine::success: // In case more writes were needed to transfer all data, the // write operation was started in complete_read_op(). break; case AsioStreamEngine::want_write: start_async_write(&AsioStreamReact::write_handler, handler); break; case AsioStreamEngine::want_read: start_async_read(&AsioStreamReact::write_handler, handler); break; case AsioStreamEngine::eof: handle_write_handler_error( handler, AsioErrorCode(asio::error::misc_errors::eof, gu_asio_misc_category)); break; case AsioStreamEngine::error: GU_ASIO_DEBUG(debug_print() << " AsioStreamReact::write_handler: Write error: " << engine_->last_error()); handle_write_handler_error(handler, engine_->last_error()); break; } } catch (const asio::system_error& e) { handle_write_handler_error(handler, AsioErrorCode(e.code().value())); } // // Private // void gu::AsioStreamReact::assign_addresses() { local_addr_ = ::uri_string( engine_->scheme(), ::escape_addr(socket_.local_endpoint().address()), gu::to_string(socket_.local_endpoint().port())); remote_addr_ = ::uri_string( engine_->scheme(), ::escape_addr(socket_.remote_endpoint().address()), gu::to_string(socket_.remote_endpoint().port())); } void gu::AsioStreamReact::prepare_engine(bool non_blocking) { if (not engine_) { engine_ = AsioStreamEngine::make(io_service_, scheme_, native_socket_handle(socket_), non_blocking); } else { engine_->assign_fd(native_socket_handle(socket_)); } } template void gu::AsioStreamReact::start_async_read(Fn fn, FnArgs... fn_args) { if (in_progress_ & read_in_progress) { return; } set_non_blocking(true); socket_.async_wait(socket_.wait_read, boost::bind(fn, shared_from_this(), fn_args..., asio::placeholders::error)); ; in_progress_ |= read_in_progress; } template void gu::AsioStreamReact::start_async_write(Fn fn, FnArgs... fn_args) { if (in_progress_ & write_in_progress) { return; } set_non_blocking(true); socket_.async_wait(socket_.wait_write, boost::bind(fn, shared_from_this(), fn_args..., asio::placeholders::error)); in_progress_ |= write_in_progress; } void gu::AsioStreamReact::complete_read_op( const std::shared_ptr& handler, size_t bytes_transferred) { assert(bytes_transferred); read_context_.inc_bytes_transferred(bytes_transferred); const size_t read_completion( handler->read_completion_condition( *this, AsioErrorCode(), read_context_.bytes_transferred())); if (read_completion == 0) { std::size_t total_transferred(read_context_.bytes_transferred()); read_context_.reset(); handler->read_handler(*this, AsioErrorCode(), total_transferred); } else { // Refuse to read more than there is available space left // in read buffer. read_context_.read_completion( std::min(read_completion, read_context_.buf().size() - read_context_.bytes_transferred())); start_async_read(&AsioStreamReact::read_handler, handler); } } void gu::AsioStreamReact::complete_write_op( const std::shared_ptr& handler, size_t bytes_transferred) { assert(bytes_transferred); write_context_.inc_bytes_transferred(bytes_transferred); if (write_context_.bytes_transferred() == write_context_.buf().size()) { std::size_t total_transferred(write_context_.bytes_transferred()); write_context_.reset(); handler->write_handler(*this, AsioErrorCode(), total_transferred); } else { start_async_write(&AsioStreamReact::write_handler, handler); } } void gu::AsioStreamReact::handle_read_handler_error( const std::shared_ptr& handler, const AsioErrorCode& ec) { shutdown(); handler->read_completion_condition( *this, ec, read_context_.bytes_transferred()); handler->read_handler( *this, ec, read_context_.bytes_transferred()); close(); } void gu::AsioStreamReact::handle_write_handler_error( const std::shared_ptr& handler, const AsioErrorCode& ec) { shutdown(); handler->write_handler( *this, ec, write_context_.bytes_transferred()); close(); } void gu::AsioStreamReact::handle_isolation_error( const std::shared_ptr& handler) { shutdown(); handler->write_handler( *this, AsioErrorCode(asio::error::basic_errors::operation_aborted, asio::error::get_system_category()), 0); close(); } void gu::AsioStreamReact::set_non_blocking(bool val) { // Socket which is once set to non-blocking mode should never // be switched back to blocking. This is to detect mixed use // of sync and async operations, which are undefined behavior. assert(not non_blocking_ || val); if (non_blocking_ != val) { socket_.non_blocking(val); socket_.native_non_blocking(val); non_blocking_ = val; } } std::string gu::AsioStreamReact::debug_print() const { std::ostringstream oss; oss << this << ": " << scheme_ << " l: " << local_addr_ << " r: " << remote_addr_ << " c: " << connected_ << " nb: " << non_blocking_ << " s: " << engine_.get(); return oss.str(); } // // Acceptor // gu::AsioAcceptorReact::AsioAcceptorReact(AsioIoService& io_service, const std::string& scheme) : io_service_(io_service) , acceptor_(io_service_.impl().native()) , scheme_(scheme) , listening_() , engine_() { } void gu::AsioAcceptorReact::open(const gu::URI& uri) try { auto resolve_result(resolve_tcp(io_service_.impl().native(), uri)); acceptor_.open(resolve_result->endpoint().protocol()); set_fd_options(acceptor_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to open acceptor: " << e.what(); } bool gu::AsioAcceptorReact::is_open() const { return acceptor_.is_open(); } void gu::AsioAcceptorReact::listen(const gu::URI& uri) try { auto resolve_result(resolve_tcp(io_service_.impl().native(), uri)); if (not acceptor_.is_open()) { acceptor_.open(resolve_result->endpoint().protocol()); set_fd_options(acceptor_); } acceptor_.set_option(asio::ip::tcp::socket::reuse_address(true)); acceptor_.bind(*resolve_result); acceptor_.listen(); listening_ = true; } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to listen: " << e.what(); } void gu::AsioAcceptorReact::close() try { if (acceptor_.is_open()) { acceptor_.close(); } listening_ = false; } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to close acceptor: " << e.what(); } void gu::AsioAcceptorReact::async_accept( const std::shared_ptr& acceptor_handler, const std::shared_ptr& handler, const std::shared_ptr& engine) try { GU_ASIO_DEBUG(this << " AsioAcceptorReact::async_accept: " << listen_addr()); auto new_socket(std::make_shared( io_service_, scheme_, engine)); auto self = shared_from_this(); acceptor_.async_accept( new_socket->socket_, [self, new_socket, acceptor_handler, handler](const asio::error_code& ec) { self->accept_handler(new_socket, acceptor_handler, handler, ec); }); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to accept: " << e.what(); } std::shared_ptr gu::AsioAcceptorReact::accept() try { auto socket(std::make_shared(io_service_, scheme_, nullptr)); acceptor_.accept(socket->socket_); set_socket_options(socket->socket_); socket->prepare_engine(false); socket->assign_addresses(); std::string remote_ip = gu::unescape_addr(::escape_addr(socket->socket_.remote_endpoint().address())); auto connection_allowed(gu::allowlist_value_check(WSREP_ALLOWLIST_KEY_IP, remote_ip)); if (connection_allowed == false) { log_warn << "Connection not allowed, IP not found in allowlist."; throw_sync_op_error(*socket->engine_, "Connection not allowed, IP not found in allowlist."); return std::shared_ptr(); } auto result(socket->engine_->server_handshake()); switch (result) { case AsioStreamEngine::success: return socket; case AsioStreamEngine::want_read: case AsioStreamEngine::want_write: case AsioStreamEngine::eof: gu_throw_error(EPROTO) << "Got unexpected return from server handshake: " << result; return std::shared_ptr(); case AsioStreamEngine::error: throw_sync_op_error(*socket->engine_, "Handshake failed"); return std::shared_ptr(); // Keep compiler happy } return socket; } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to accept: " << e.what(); } std::string gu::AsioAcceptorReact::listen_addr() const try { return uri_string( scheme_, ::escape_addr(acceptor_.local_endpoint().address()), gu::to_string(acceptor_.local_endpoint().port())); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "failed to read listen addr " << "', asio error '" << e.what() << "'"; } unsigned short gu::AsioAcceptorReact::listen_port() const try { return acceptor_.local_endpoint().port(); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "failed to read listen port " << "', asio error '" << e.what() << "'"; } void gu::AsioAcceptorReact::set_receive_buffer_size(size_t size) try { assert(not listening_); ::set_receive_buffer_size(acceptor_, size); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error setting receive buffer size"; } size_t gu::AsioAcceptorReact::get_receive_buffer_size() try { return ::get_receive_buffer_size(acceptor_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error getting receive buffer size"; return 0; } void gu::AsioAcceptorReact::set_send_buffer_size(size_t size) try { assert(not listening_); ::set_send_buffer_size(acceptor_, size); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error setting send buffer size"; } size_t gu::AsioAcceptorReact::get_send_buffer_size() try { return ::get_send_buffer_size(acceptor_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error getting send buffer size"; return 0; } void gu::AsioAcceptorReact::accept_handler( const std::shared_ptr& socket, const std::shared_ptr& acceptor_handler, const std::shared_ptr& handler, const asio::error_code& ec) try { GU_ASIO_DEBUG(this << " AsioAcceptorReact::accept_handler(): " << ec); if (ec) { acceptor_handler->accept_handler( *this, socket, AsioErrorCode(ec.value(), ec.category())); return; } set_socket_options(socket->socket_); socket->set_non_blocking(true); socket->prepare_engine(true); socket->assign_addresses(); std::string remote_ip = gu::unescape_addr(::escape_addr(socket->socket_.remote_endpoint().address())); bool connection_allowed(gu::allowlist_value_check(WSREP_ALLOWLIST_KEY_IP, remote_ip)); if (connection_allowed == false) { log_warn << "Connection not allowed, IP " << remote_ip << " not found in allowlist."; acceptor_handler->accept_handler(*this, socket, AsioErrorCode::make_eof()); return; } socket->connected_ = true; // Necessary async reads/writes/waits are done within // server_handshake_handler(). acceptor_handler->accept_handler(*this, socket, AsioErrorCode()); socket->server_handshake_handler(handler, ec); } catch(const asio::system_error& e) { acceptor_handler->accept_handler(*this, socket, AsioErrorCode(e.code().value())); } galera-4-26.4.25/galerautils/src/gu_mutex.hpp000644 000164 177776 00000007654 15107057155 022201 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy * */ #ifndef __GU_MUTEX__ #define __GU_MUTEX__ #include "gu_macros.h" #include "gu_threads.h" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_abort.h" #include #include #include #include // abort() #if !defined(GU_DEBUG_MUTEX) && !defined(NDEBUG) #define GU_MUTEX_DEBUG #endif namespace gu { class Mutex { public: Mutex () : value_() #ifdef GU_MUTEX_DEBUG , owned_() , locked_() #endif /* GU_MUTEX_DEBUG */ { gu_mutex_init (&value_, NULL); // always succeeds } ~Mutex () { int const err(gu_mutex_destroy (&value_)); if (gu_unlikely(err != 0)) { log_fatal << "Mutex destroy failed: " << err << " (" << strerror(err) << "), Aborting."; gu_abort(); } } void lock() const { int const err(gu_mutex_lock(&value_)); if (gu_likely(0 == err)) { #ifdef GU_MUTEX_DEBUG locked_ = true; owned_ = gu_thread_self(); #endif /* GU_MUTEX_DEBUG */ } else { log_fatal << "Mutex lock failed: " << err << " (" << strerror(err) << "), Aborting."; // Do not throw exception here because it will be // uncaught in many destructors. gu_abort(); } } void unlock() const { // this is not atomic, but the presumption is that unlock() // should never be called before preceding lock() completes #if defined(GU_DEBUG_MUTEX) || defined(GU_MUTEX_DEBUG) assert(locked()); assert(owned()); #if defined(GU_MUTEX_DEBUG) locked_ = false; #endif /* GU_MUTEX_DEBUG */ disown(); #endif /* GU_DEBUG_MUTEX */ int const err(gu_mutex_unlock(&value_)); if (gu_unlikely(0 != err)) { log_fatal << "Mutex unlock failed: " << err << " (" << strerror(err) << "), Aborting."; gu_abort(); } } gu_mutex_t& impl() const { return value_; } #if defined(GU_DEBUG_MUTEX) bool locked() const { return gu_mutex_locked(&value_); } bool owned() const { return locked() && gu_mutex_owned(&value_); } void disown() const { gu_mutex_disown(&value); } #elif defined(GU_MUTEX_DEBUG) bool locked() const { return locked_; } bool owned() const { return locked() && gu_thread_equal(owned_,gu_thread_self()); } void disown() const { memset(&owned_, 0, sizeof(owned_)); } #endif /* GU_DEBUG_MUTEX */ protected: gu_mutex_t mutable value_; #ifdef GU_MUTEX_DEBUG gu_thread_t mutable owned_; bool mutable locked_; #endif /* GU_MUTEX_DEBUG */ private: Mutex (const Mutex&); Mutex& operator= (const Mutex&); friend class Lock; }; class RecursiveMutex { public: RecursiveMutex() : mutex_() { pthread_mutexattr_t mattr; pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&mutex_, &mattr); pthread_mutexattr_destroy(&mattr); } ~RecursiveMutex() { pthread_mutex_destroy(&mutex_); } void lock() { if (pthread_mutex_lock(&mutex_)) gu_throw_fatal; } void unlock() { if (pthread_mutex_unlock(&mutex_)) gu_throw_fatal; } private: RecursiveMutex(const RecursiveMutex&); void operator=(const RecursiveMutex&); pthread_mutex_t mutex_; }; } #endif /* __GU_MUTEX__ */ galera-4-26.4.25/galerautils/src/gu_signals.hpp000644 000164 177776 00000001677 15107057155 022476 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2021 Codership Oy // #ifndef GU_SIGNALS_HPP #define GU_SIGNALS_HPP #include namespace gu { class Signals { public: enum SignalType { S_CONFIG_RELOAD_CERTIFICATE, }; typedef boost::signals2::signal signal_t; typedef signal_t::slot_type slot_type; typedef boost::signals2::connection signal_connection; static Signals& Instance(); signal_connection connect(const slot_type &subscriber); void signal(const SignalType&); Signals(Signals const&) = delete; Signals(Signals&&) = delete; Signals& operator=(Signals const&) = delete; Signals& operator=(Signals &&) = delete; private: Signals() : signal_() { }; ~Signals() = default; signal_t signal_; }; } // namespace gu #endif // GU_SIGNALS_HPP galera-4-26.4.25/galerautils/src/gu_config.h000644 000164 177776 00000003371 15107057155 021734 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2014 Codership Oy /** * @file * C-interface for configuration management * * $Id$ */ #ifndef _gu_config_h_ #define _gu_config_h_ #include #include #include // for ssize_t #ifdef __cplusplus extern "C" { #endif typedef struct gu_config gu_config_t; gu_config_t* gu_config_create (void); void gu_config_destroy (gu_config_t* cnf); bool gu_config_has (gu_config_t* cnf, const char* key); bool gu_config_is_set (gu_config_t* cnf, const char* key); /* before setting a parameter, it must be added to a known parameter list */ int gu_config_add (gu_config_t* cnf, const char* key, const char* val /*can be NULL*/, int flags); /* Getters/setters return 0 on success, 1 when key not set/not found, * negative error code in case of other errors (conversion failed and such) */ int gu_config_get_string (gu_config_t* cnf, const char* key, const char** val); int gu_config_get_int64 (gu_config_t* cnf, const char* key, int64_t* val); int gu_config_get_double (gu_config_t* cnf, const char* key, double* val); int gu_config_get_ptr (gu_config_t* cnf, const char* key, void** val); int gu_config_get_bool (gu_config_t* cnf, const char* key, bool* val); void gu_config_set_string (gu_config_t* cnf, const char* key, const char* val); void gu_config_set_int64 (gu_config_t* cnf, const char* key, int64_t val); void gu_config_set_double (gu_config_t* cnf, const char* key, double val); void gu_config_set_ptr (gu_config_t* cnf, const char* key, const void* val); void gu_config_set_bool (gu_config_t* cnf, const char* key, bool val); ssize_t gu_config_print (gu_config_t* cnf, char* buf, ssize_t buf_len); #ifdef __cplusplus } #endif #endif /* _gu_config_h_ */ galera-4-26.4.25/galerautils/src/gu_lock_step.c000644 000164 177776 00000006711 15107057155 022446 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #include // abort() #include // error codes #include #include #include // strerror() #include "gu_log.h" #include "gu_assert.h" #include "gu_time.h" #include "gu_lock_step.h" void gu_lock_step_init (gu_lock_step_t* ls) { gu_mutex_init (&ls->mtx, NULL); gu_cond_init (&ls->cond, NULL); ls->wait = 0; ls->cont = 0; ls->enabled = false; } void gu_lock_step_destroy (gu_lock_step_t* ls) { // this is not really fool-proof, but that's not for fools to use while (gu_lock_step_cont(ls, 10)) {}; gu_cond_destroy (&ls->cond); gu_mutex_destroy (&ls->mtx); assert (0 == ls->wait); } void gu_lock_step_enable (gu_lock_step_t* ls, bool enable) { if (!gu_mutex_lock (&ls->mtx)) { ls->enabled = enable; gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } } void gu_lock_step_wait (gu_lock_step_t* ls) { if (!gu_mutex_lock (&ls->mtx)) { if (ls->enabled) { if (!ls->cont) { // wait for signal ls->wait++; gu_cond_wait (&ls->cond, &ls->mtx); } else { // signal to signaller gu_cond_signal (&ls->cond); ls->cont--; } } gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } } /* returns how many waiters are there */ long gu_lock_step_cont (gu_lock_step_t* ls, long timeout_ms) { long ret = -1; if (!gu_mutex_lock (&ls->mtx)) { if (ls->enabled) { if (ls->wait > 0) { // somebody's waiting ret = ls->wait; gu_cond_signal (&ls->cond); ls->wait--; } else if (timeout_ms > 0) { // wait for waiter // what a royal mess with times! Why timeval exists? struct timeval now; struct timespec timeout; long err; gettimeofday (&now, NULL); gu_timeval_add (&now, timeout_ms * 0.001); timeout.tv_sec = now.tv_sec; timeout.tv_nsec = now.tv_usec * 1000; ls->cont++; do { err = gu_cond_timedwait (&ls->cond, &ls->mtx, &timeout); } while (EINTR == err); assert ((0 == err) || (ETIMEDOUT == err && ls->cont > 0)); ret = (0 == err); // successful rendezvous with waiter ls->cont -= (0 != err); // self-decrement in case of error } else if (timeout_ms < 0) { // wait forever long err; ls->cont++; err = gu_cond_wait (&ls->cond, &ls->mtx); ret = (0 == err); // successful rendezvous with waiter ls->cont -= (0 != err); // self-decrement in case of error } else { // don't wait ret = 0; } } gu_mutex_unlock (&ls->mtx); } else { gu_fatal ("Mutex lock failed"); assert (0); abort(); } return ret; } galera-4-26.4.25/galerautils/src/gu_utils.h000644 000164 177776 00000001573 15107057155 021631 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010 Codership Oy /** * @file Miscellaneous utility functions * * $Id$ */ #ifndef _gu_utils_h_ #define _gu_utils_h_ #include #ifdef __cplusplus extern "C" { #endif /* * The string conversion functions below are slighly customized * versions of standard libc functions designed to understand 'on'/'off' and * K/M/G size modifiers and the like. * * They return pointer to the next character after conversion: * - if (ret == str) no conversion was made * - if (ret[0] == '\0') whole string was converted */ extern const char* gu_str2ll (const char* str, long long* ll); extern const char* gu_str2dbl (const char* str, double* dbl); extern const char* gu_str2bool (const char* str, bool* b); extern const char* gu_str2ptr (const char* str, void** ptr); #ifdef __cplusplus } #endif #endif /* _gu_utils_h_ */ galera-4-26.4.25/galerautils/src/gu_hexdump.hpp000644 000164 177776 00000002012 15107057155 022470 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file operator << for hexdumps. * * Usage: std::cout << gu::Hexdump(ptr, size) * * $Id$ */ #ifndef _GU_HEXDUMP_HPP_ #define _GU_HEXDUMP_HPP_ #include "gu_types.hpp" #include namespace gu { class Hexdump { public: Hexdump (const void* const buf, size_t const size, bool const alpha = false) : buf_ (static_cast(buf)), size_ (size), alpha_(alpha) {} std::ostream& to_stream (std::ostream& os) const; // according to clang C++98 wants copy ctor to be public for temporaries Hexdump (const Hexdump& h) : buf_(h.buf_), size_(h.size_), alpha_(h.alpha_) {} private: const byte_t* const buf_; size_t const size_; bool const alpha_; Hexdump& operator = (const Hexdump&); }; inline std::ostream& operator << (std::ostream& os, const Hexdump& h) { return h.to_stream(os); } } #endif /* _GU_HEXDUMP_HPP_ */ galera-4-26.4.25/galerautils/src/gu_fdesc.cpp000644 000164 177776 00000016355 15107057155 022114 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy * * $Id$ */ #include "gu_fdesc.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" extern "C" { #include "gu_limits.h" } #if !defined(_XOPEN_SOURCE) && !defined(__APPLE__) #define _XOPEN_SOURCE 600 #endif #include #include #include #include #include #include #ifndef O_CLOEXEC // CentOS < 6.0 does not have it #define O_CLOEXEC 0 #endif #ifndef O_NOATIME #define O_NOATIME 0 #endif namespace gu { static int const OPEN_FLAGS = O_RDWR | O_NOATIME | O_CLOEXEC; static int const CREATE_FLAGS = OPEN_FLAGS | O_CREAT /*| O_TRUNC*/; /* respect user umask by allowing all bits by default */ static mode_t const CREATE_MODE = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ; FileDescriptor::FileDescriptor (const std::string& fname, bool const sync) : name_(fname), fd_ (open (name_.c_str(), OPEN_FLAGS)), size_(fd_ < 0 ? 0 : lseek (fd_, 0, SEEK_END)), sync_(sync) { constructor_common(); } static unsigned long long available_storage(const std::string& name, size_t size) { static size_t const reserve(1 << 20); // reserve 1M free space struct statvfs stat; int const err(statvfs(name.c_str(), &stat)); if (0 == err) { unsigned long long const free_size(stat.f_bavail * stat.f_bsize); if (reserve < free_size) { return free_size - reserve; } else { return 0; } } else { int const errn(errno); log_warn << "statvfs() failed on '" << name << "' partition: " << errn << " (" << strerror(errn) <<"). Proceeding anyway."; return std::numeric_limits::max(); } } FileDescriptor::FileDescriptor (const std::string& fname, size_t const size, bool const allocate, bool const sync) : name_(fname), fd_ (open (fname.c_str(), CREATE_FLAGS, CREATE_MODE)), size_(size), sync_(sync) { constructor_common(); off_t const current_size(lseek (fd_, 0, SEEK_END)); if (current_size < size_) { unsigned long long const available(available_storage(name_, size_)); if (size_t(size_) > available) { ::close(fd_); ::unlink(name_.c_str()); gu_throw_error(ENOSPC) << "Requested size " << size_ << " for '" << name_ << "' exceeds available storage space " << available; } if (allocate) { // reserve space that hasn't been reserved prealloc (current_size); } else { // reserve size or bus error follows mmap() write_byte (size_ - 1); } } else if (current_size > size_) { log_debug << "Truncating '" << name_<< "' to " << size_<< " bytes."; if (ftruncate(fd_, size_)) { gu_throw_system_error(errno) << "Failed to truncate '" << name_ << "' to " << size_ << " bytes."; } } else { log_debug << "Reusing existing '" << name_ << "'."; } } void FileDescriptor::constructor_common() { if (fd_ < 0) { gu_throw_system_error(errno) << "Failed to open file '" + name_ + '\''; } #if !defined(__APPLE__) /* Darwin does not have posix_fadvise */ /* benefits are questionable int err(posix_fadvise (value, 0, size, POSIX_FADV_SEQUENTIAL)); if (err != 0) { log_warn << "Failed to set POSIX_FADV_SEQUENTIAL on " << name << ": " << err << " (" << strerror(err) << ")"; } */ #endif log_debug << "Opened file '" << name_ << "', size: " << size_; log_debug << "File descriptor: " << fd_; } FileDescriptor::~FileDescriptor () { if (sync_) { try { sync(); } catch (Exception& e) { log_error << e.what(); } } if (close(fd_) != 0) { int const err(errno); log_error << "Failed to close file '" << name_ << "': " << err << " (" << strerror(err) << '\''; } else { log_debug << "Closed file '" << name_ << "'"; } } void FileDescriptor::sync () const { log_debug << "Flushing file '" << name_ << "'"; if (fsync (fd_) < 0) { gu_throw_system_error(errno) << "fsync() failed on '" + name_ + '\''; } log_debug << "Flushed file '" << name_ << "'"; } bool FileDescriptor::write_byte (off_t offset) { byte_t const byte (0); if (lseek (fd_, offset, SEEK_SET) != offset) gu_throw_system_error(errno) << "lseek() failed on '" << name_ << '\''; if (write (fd_, &byte, sizeof(byte)) != sizeof(byte)) gu_throw_system_error(errno) << "write() failed on '" << name_ << '\''; return true; } /*! prealloc() fallback */ void FileDescriptor::write_file (off_t const start) { // last byte of the start page off_t offset = (start / GU_PAGE_SIZE + 1) * GU_PAGE_SIZE - 1; log_info << "Preallocating " << (size_ - start) << '/' << size_ << " bytes in '" << name_ << "'..."; while (offset < size_ && write_byte (offset)) { offset += GU_PAGE_SIZE; } if (offset >= size_ && write_byte (size_ - 1)) { sync(); return; } gu_throw_system_error (errno) << "File preallocation failed"; } void FileDescriptor::prealloc(off_t const start) { if (start < 0) { log_warn << "Offset is negative in '" << name_ << "'"; return; } off_t const diff (size_ - start); if (diff < 0) { log_warn << "Offset is greater than the file size in '" << name_ << "'"; return; } log_debug << "Preallocating " << diff << '/' << size_ << " bytes in '" << name_ << "'..."; #if defined(__APPLE__) if (-1 == fcntl (fd_, F_SETSIZE, size_) && -1 == ftruncate (fd_, size_)) { #else int const ret = posix_fallocate (fd_, start, diff); if (0 != ret) { errno = ret; #endif if ((EINVAL == errno || ENOSYS == errno)) { // FS does not support the operation, try physical write write_file (start); } else { gu_throw_system_error (errno) << "File preallocation failed"; } } } } galera-4-26.4.25/galerautils/src/gu_types.hpp000644 000164 177776 00000000424 15107057155 022167 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file Location of some "standard" types definitions * * $Id$ */ #ifndef _GU_TYPES_HPP_ #define _GU_TYPES_HPP_ #include "gu_types.h" namespace gu { typedef gu_byte_t byte_t; } #endif /* _GU_TYPES_HPP_ */ galera-4-26.4.25/galerautils/src/gu_rand.h000644 000164 177776 00000001243 15107057155 021407 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file routines to generate "random" seeds for RNGs by collecting some easy * entropy. * * gu_rand_seed_long() goes for srand48() * * gu_rand_seed_int() goes for srand() and rand_r() * * $Id$ */ #ifndef _gu_rand_h_ #define _gu_rand_h_ #include "gu_arch.h" #include // for pid_t extern long int gu_rand_seed_long (long long time, const void* heap_ptr, pid_t pid); #if GU_WORDSIZE == 32 extern unsigned int gu_rand_seed_int (long long time, const void* heap_ptr, pid_t pid); #else #define gu_rand_seed_int gu_rand_seed_long #endif /* GU_WORDSIZE */ #endif /* _gu_rand_h_ */ galera-4-26.4.25/galerautils/src/gu_backtrace.h000644 000164 177776 00000001411 15107057155 022377 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy #ifndef GU_BACKTRACE_H #define GU_BACKTRACE_H #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /*! * Get current backtrace. Return buffer will contain backtrace symbols if * available. NULL pointer is returned if getting backtrace is not supported * on current platform. Maximum number of frames in backtrace is passed * in size parameter, number of frames in returned backtrace is assigned * in size parameter on return. * * @param size Pointer to integer containing maximum number of frames * in backtrace * * @return Allocated array of strings containing backtrace symbols */ char** gu_backtrace(int* size); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* GU_BACKTRACE_H */ galera-4-26.4.25/galerautils/src/gu_to.c000644 000164 177776 00000027121 15107057155 021103 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! \file \brief Total order access "class" implementation. * Although gcs_repl() and gcs_recv() calls return sequence * numbers in total order, there are concurrency issues between * application threads and they can grab critical section * mutex out of order. Wherever total order access to critical * section is required, these functions can be used to do this. */ #include #include #include #include #include // abort() #include "gu_log.h" #include "gu_assert.h" #include "gu_mem.h" #include "gu_threads.h" #include "gu_to.h" #define TO_USE_SIGNAL 1 typedef enum { HOLDER = 0, //!< current TO holder WAIT, //!< actively waiting in the queue CANCELED, //!< Waiter has canceled its to request INTERRUPTED,//!< marked to be interrupted RELEASED, //!< has been released, free entry now } waiter_state_t; typedef struct { #ifdef TO_USE_SIGNAL gu_cond_t cond; #else pthread_mutex_t mtx; // have to use native pthread for double locking #endif waiter_state_t state; } to_waiter_t; struct gu_to { volatile gu_seqno_t seqno; size_t used; /* number of active waiters */ ssize_t qlen; size_t qmask; to_waiter_t* queue; gu_mutex_t lock; }; /** Returns pointer to the waiter with the given seqno */ static inline to_waiter_t* to_get_waiter (gu_to_t* to, gu_seqno_t seqno) { // Check for queue overflow. Tell application that it should wait. if (seqno >= to->seqno + to->qlen) { return NULL; } return (to->queue + (seqno & to->qmask)); } gu_to_t *gu_to_create (int len, gu_seqno_t seqno) { gu_to_t *ret; assert (seqno >= 0); if (len <= 0) { gu_error ("Negative length parameter: %d", len); return NULL; } ret = GU_CALLOC (1, gu_to_t); if (ret) { /* Make queue length a power of 2 */ ret->qlen = 1; while (ret->qlen < len) { // unsigned, can be bigger than any integer ret->qlen = ret->qlen << 1; } ret->qmask = ret->qlen - 1; ret->seqno = seqno; ret->queue = GU_CALLOC (ret->qlen, to_waiter_t); if (ret->queue) { ssize_t i; for (i = 0; i < ret->qlen; i++) { to_waiter_t *w = ret->queue + i; #ifdef TO_USE_SIGNAL gu_cond_init (&w->cond, NULL); #else pthread_mutex_init (&w->mtx, NULL); #endif w->state = RELEASED; } gu_mutex_init (&ret->lock, NULL); return ret; } gu_free (ret); } return NULL; } long gu_to_destroy (gu_to_t** to) { gu_to_t *t = *to; long ret; ssize_t i; gu_mutex_lock (&t->lock); if (t->used) { gu_mutex_unlock (&t->lock); return -EBUSY; } for (i = 0; i < t->qlen; i++) { to_waiter_t *w = t->queue + i; #ifdef TO_USE_SIGNAL if (gu_cond_destroy (&w->cond)) { // @todo: what if someone is waiting? gu_warn ("Failed to destroy condition %zd. Should not happen", i); } #else if (pthread_mutex_destroy (&w->mtx)) { // @todo: what if someone is waiting? gu_warn ("Failed to destroy mutex %zd. Should not happen", i); } #endif } t->qlen = 0; gu_mutex_unlock (&t->lock); /* What else can be done here? */ ret = gu_mutex_destroy (&t->lock); if (ret) return -ret; // application can retry gu_free (t->queue); gu_free (t); *to = NULL; return 0; } long gu_to_grab (gu_to_t* to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock(&to->lock))) { gu_fatal("Mutex lock failed (%ld): %s", err, strerror(err)); abort(); } if (seqno < to->seqno) { gu_mutex_unlock(&to->lock); return -ECANCELED; } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ switch (w->state) { case INTERRUPTED: w->state = RELEASED; err = -EINTR; break; case CANCELED: err = -ECANCELED; break; case RELEASED: if (seqno == to->seqno) { w->state = HOLDER; } else if (seqno < to->seqno) { gu_error("Trying to grab outdated seqno"); err = -ECANCELED; } else { /* seqno > to->seqno, wait for my turn */ w->state = WAIT; to->used++; #ifdef TO_USE_SIGNAL gu_cond_wait(&w->cond, &to->lock); #else pthread_mutex_lock (&w->mtx); pthread_mutex_unlock (&to->lock); pthread_mutex_lock (&w->mtx); // wait for unlock by other thread pthread_mutex_lock (&to->lock); pthread_mutex_unlock (&w->mtx); #endif to->used--; switch (w->state) { case WAIT:// should be most probable assert (seqno == to->seqno); w->state = HOLDER; break; case INTERRUPTED: w->state = RELEASED; err = -EINTR; break; case CANCELED: err = -ECANCELED; break; case RELEASED: /* this waiter has been cancelled */ assert(seqno < to->seqno); err = -ECANCELED; break; default: gu_fatal("Invalid cond wait exit state %d, seqno %" PRId64 "(%" PRId64 ")", w->state, seqno, to->seqno); abort(); } } break; default: gu_fatal("TO queue over wrap"); abort(); } gu_mutex_unlock(&to->lock); return err; } static inline long to_wake_waiter (to_waiter_t* w) { long err = 0; if (w->state == WAIT) { #ifdef TO_USE_SIGNAL err = gu_cond_signal (&w->cond); #else err = pthread_mutex_unlock (&w->mtx); #endif if (err) { gu_fatal ("gu_cond_signal failed: %ld", err); } } return err; } static inline void to_release_and_wake_next (gu_to_t* to, to_waiter_t* w) { w->state = RELEASED; /* * Iterate over CANCELED waiters and set states as RELEASED * We look for waiter in the head of queue, which guarantees that * to_get_waiter() will always return a valid waiter pointer */ for (to->seqno++; (w = to_get_waiter(to, to->seqno)) && w && w->state == CANCELED; to->seqno++) { w->state = RELEASED; } to_wake_waiter (w); } long gu_to_release (gu_to_t *to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock(&to->lock))) { gu_fatal("Mutex lock failed (%ld): %s", err, strerror(err)); abort(); } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ if (seqno == to->seqno) { to_release_and_wake_next (to, w); } else if (seqno > to->seqno) { if (w->state != CANCELED) { gu_fatal("Illegal state in premature release: %d", w->state); abort(); } /* Leave state CANCELED so that real releaser can iterate */ } else { /* */ if (w->state != RELEASED) { gu_fatal("Outdated seqno and state not RELEASED: %d", w->state); abort(); } } gu_mutex_unlock(&to->lock); return err; } gu_seqno_t gu_to_seqno (gu_to_t* to) { return to->seqno - 1; } long gu_to_cancel (gu_to_t *to, gu_seqno_t seqno) { long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%ld): %s", err, strerror(err)); abort(); } // Check for queue overflow. This is totally unrecoverable. Abort. if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); abort(); } /* we have a valid waiter now */ if ((seqno > to->seqno) || (seqno == to->seqno && w->state != HOLDER)) { err = to_wake_waiter (w); w->state = CANCELED; } else if (seqno == to->seqno && w->state == HOLDER) { gu_warn("tried to cancel current TO holder, state %d seqno %" PRId64, w->state, seqno); err = -ECANCELED; } else { gu_warn("trying to cancel used seqno: state %d cancel seqno = %" PRId64 ", " "TO seqno = %" PRId64, w->state, seqno, to->seqno); err = -ECANCELED; } gu_mutex_unlock (&to->lock); return err; } long gu_to_self_cancel(gu_to_t *to, gu_seqno_t seqno) { long err = 0; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%ld): %s", err, strerror(err)); abort(); } if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ if (seqno > to->seqno) { // most probable case w->state = CANCELED; } else if (seqno == to->seqno) { // have to wake the next waiter as if we grabbed and now releasing TO to_release_and_wake_next (to, w); } else { // (seqno < to->seqno) // This waiter must have been canceled or even released by preceding // waiter. Do nothing. } gu_mutex_unlock(&to->lock); return err; } long gu_to_interrupt (gu_to_t *to, gu_seqno_t seqno) { long rcode = 0; long err; to_waiter_t *w; assert (seqno >= 0); if ((err = gu_mutex_lock (&to->lock))) { gu_fatal("Mutex lock failed (%ld): %s", err, strerror(err)); abort(); } if (seqno >= to->seqno) { if ((w = to_get_waiter (to, seqno)) == NULL) { gu_mutex_unlock(&to->lock); return -EAGAIN; } /* we have a valid waiter now */ switch (w->state) { case HOLDER: gu_debug("trying to interrupt in use seqno: seqno = %" PRId64 ", " "TO seqno = %" PRId64, seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; break; case CANCELED: gu_debug("trying to interrupt canceled seqno: seqno = %" PRId64 ", " "TO seqno = %" PRId64, seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; break; case WAIT: gu_debug("signaling to interrupt wait seqno: seqno = %" PRId64 ", " "TO seqno = %" PRId64, seqno, to->seqno); rcode = to_wake_waiter(w); /* fall through */ case RELEASED: w->state = INTERRUPTED; break; case INTERRUPTED: gu_debug("TO waiter interrupt already seqno: seqno = %" PRId64 ", " "TO seqno = %" PRId64, seqno, to->seqno); break; } } else { gu_debug("trying to interrupt used seqno: cancel seqno = %" PRId64 ", " "TO seqno = %" PRId64, seqno, to->seqno); /* gu_mutex_unlock (&to->lock); */ rcode = -ERANGE; } gu_mutex_unlock (&to->lock); return rcode; } galera-4-26.4.25/galerautils/src/gu_thread.cpp000644 000164 177776 00000005673 15107057155 022300 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2016 Codership Oy // #include "gu_thread.hpp" #include "gu_utils.hpp" #include "gu_string_utils.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include #include static std::string const SCHED_OTHER_STR ("other"); static std::string const SCHED_FIFO_STR ("fifo"); static std::string const SCHED_RR_STR ("rr"); static std::string const SCHED_UNKNOWN_STR("unknown"); static inline void parse_thread_schedparam(const std::string& param, int& policy, int& prio) { std::vector sv(gu::strsplit(param, ':')); if (sv.size() != 2) { gu_throw_error(EINVAL) << "Invalid schedparam: " << param; } if (sv[0] == SCHED_OTHER_STR) policy = SCHED_OTHER; else if (sv[0] == SCHED_FIFO_STR) policy = SCHED_FIFO; else if (sv[0] == SCHED_RR_STR) policy = SCHED_RR; else gu_throw_error(EINVAL) << "Invalid scheduling policy: " << sv[0]; prio = gu::from_string(sv[1]); } gu::ThreadSchedparam gu::ThreadSchedparam::system_default(SCHED_OTHER, 0); gu::ThreadSchedparam::ThreadSchedparam(const std::string& param) : policy_(), prio_ () { if (param == "") { *this = system_default; } else { parse_thread_schedparam(param, policy_, prio_); } } void gu::ThreadSchedparam::print(std::ostream& os) const { std::string policy_str; switch (policy()) { case SCHED_OTHER: policy_str = SCHED_OTHER_STR ; break; case SCHED_FIFO: policy_str = SCHED_FIFO_STR ; break; case SCHED_RR: policy_str = SCHED_RR_STR ; break; default: policy_str = SCHED_UNKNOWN_STR; break; } os << policy_str << ":" << prio(); } gu::ThreadSchedparam gu::thread_get_schedparam(pthread_t thd) { int policy; struct sched_param sp; int err; if ((err = pthread_getschedparam(thd, &policy, &sp)) != 0) { gu_throw_system_error(err) << "Failed to read thread schedparams"; } return ThreadSchedparam(policy, sp.sched_priority); } static bool schedparam_not_supported(false); void gu::thread_set_schedparam(pthread_t thd, const gu::ThreadSchedparam& sp) { if (schedparam_not_supported) return; #if defined(__sun__) struct sched_param spstr = { sp.prio(), { 0, } /* sched_pad array */}; #else struct sched_param spstr = { sp.prio() }; #endif int err; if ((err = pthread_setschedparam(thd, sp.policy(), &spstr)) != 0) { if (err == ENOSYS) { log_warn << "Function pthread_setschedparam() is not implemented " << "in this system. Future attempts to change scheduling " << "priority will be no-op"; schedparam_not_supported = true; } else { gu_throw_system_error(err) << "Failed to set thread schedparams " << sp; } } } galera-4-26.4.25/galerautils/src/gu_asio.cpp000644 000164 177776 00000057005 15107057155 021760 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2014-2020 Codership Oy // #include "gu_config.hpp" #include "gu_asio.hpp" #include "gu_datetime.hpp" #ifdef ASIO_HPP #error "asio.hpp is already included before gu_asio.hpp, can't customize asio.hpp" #endif // ASIO_HPP #include "asio/version.hpp" // ASIO does not interact well with kqueue before ASIO 1.10.5, see // https://readlist.com/lists/freebsd.org/freebsd-current/23/119264.html // http://think-async.com/Asio/asio-1.10.6/doc/asio/history.html#asio.history.asio_1_10_5 #if ASIO_VERSION < 101005 # define ASIO_DISABLE_KQUEUE #endif // ASIO_VERSION < 101005 #define GU_ASIO_IMPL #include "gu_asio_datagram.hpp" #include "gu_asio_debug.hpp" #include "gu_asio_error_category.hpp" #include "gu_asio_io_service_impl.hpp" #include "gu_asio_ip_address_impl.hpp" #include "gu_asio_stream_react.hpp" #include "gu_asio_utils.hpp" #include "gu_signals.hpp" #ifndef ASIO_HAS_BOOST_BIND #define ASIO_HAS_BOOST_BIND #endif // ASIO_HAS_BOOST_BIND #include "asio/placeholders.hpp" #ifdef GALERA_HAVE_SSL #include "asio/ssl.hpp" #endif // GALERA_HAVE_SSL #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) #include "asio/deadline_timer.hpp" #else #include "asio/steady_timer.hpp" #endif // #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) #include #include #include static wsrep_allowlist_service_v1_t* gu_allowlist_service(0); // // AsioIpAddress wrapper // // // IPv4 // gu::AsioIpAddressV4::AsioIpAddressV4() : impl_(std::unique_ptr(new Impl)) { } gu::AsioIpAddressV4::AsioIpAddressV4(const AsioIpAddressV4& other) : impl_(std::unique_ptr(new Impl(*other.impl_))) { } gu::AsioIpAddressV4& gu::AsioIpAddressV4::operator=(AsioIpAddressV4 other) { std::swap(this->impl_, other.impl_); return *this; } gu::AsioIpAddressV4::~AsioIpAddressV4() { } bool gu::AsioIpAddressV4::is_multicast() const { return impl_->native().is_multicast(); } gu::AsioIpAddressV4::Impl& gu::AsioIpAddressV4::impl() { return *impl_; } const gu::AsioIpAddressV4::Impl& gu::AsioIpAddressV4::impl() const { return *impl_; } // // IPv6 // gu::AsioIpAddressV6::AsioIpAddressV6() : impl_(std::unique_ptr(new Impl)) { } gu::AsioIpAddressV6::AsioIpAddressV6(const AsioIpAddressV6& other) : impl_(std::unique_ptr(new Impl(*other.impl_))) { } gu::AsioIpAddressV6& gu::AsioIpAddressV6::operator=(AsioIpAddressV6 other) { std::swap(this->impl_, other.impl_); return *this; } gu::AsioIpAddressV6::~AsioIpAddressV6() { } bool gu::AsioIpAddressV6::is_link_local() const { return impl_->native().is_link_local(); } unsigned long gu::AsioIpAddressV6::scope_id() const { return impl_->native().scope_id(); } bool gu::AsioIpAddressV6::is_multicast() const { return impl_->native().is_multicast(); } gu::AsioIpAddressV6::Impl& gu::AsioIpAddressV6::impl() { return *impl_; } const gu::AsioIpAddressV6::Impl& gu::AsioIpAddressV6::impl() const { return *impl_; } // // Generic Ip address wrapper // gu::AsioIpAddress::AsioIpAddress() : impl_(std::unique_ptr(new Impl)) { } gu::AsioIpAddress::AsioIpAddress(const AsioIpAddress& other) : impl_(std::unique_ptr(new Impl(*other.impl_))) { } gu::AsioIpAddress& gu::AsioIpAddress::operator=(AsioIpAddress other) { std::swap(this->impl_, other.impl_); return *this; } gu::AsioIpAddress::~AsioIpAddress() { } gu::AsioIpAddress::Impl& gu::AsioIpAddress::impl() { return *impl_; } const gu::AsioIpAddress::Impl& gu::AsioIpAddress::impl() const { return *impl_; } bool gu::AsioIpAddress::is_v4() const { return impl_->native().is_v4(); } bool gu::AsioIpAddress::is_v6() const { return impl_->native().is_v6(); } gu::AsioIpAddressV4 gu::AsioIpAddress::to_v4() const { gu::AsioIpAddressV4 ret; ret.impl().native() = impl_->native().to_v4(); return ret; } gu::AsioIpAddressV6 gu::AsioIpAddress::to_v6() const { gu::AsioIpAddressV6 ret; ret.impl().native() = impl_->native().to_v6(); return ret; } // // Asio Error Code // gu::AsioErrorCategory gu_asio_system_category(asio::error::get_system_category()); gu::AsioErrorCategory gu_asio_misc_category(asio::error::get_misc_category()); #ifdef GALERA_HAVE_SSL gu::AsioErrorCategory gu_asio_ssl_category(asio::error::get_ssl_category()); #endif // GALERA_HAVE_SSL gu::AsioErrorCode::AsioErrorCode() : value_() , category_(&gu_asio_system_category) , error_extra_() { } gu::AsioErrorCode::AsioErrorCode(int err) : value_(err) , category_(&gu_asio_system_category) , error_extra_() { } std::string gu::AsioErrorCode::message() const { if (category_) { std::string ret( asio::error_code(value_, category_->native()).message()); #ifdef GALERA_HAVE_SSL if (*category_ == gu_asio_ssl_category && error_extra_) { ret += std::string(": ") + X509_verify_cert_error_string(error_extra_); } #endif // GALERA_HAVE_SSL return ret; } else { std::ostringstream oss; oss << ::strerror(value_); return oss.str(); } } std::ostream& gu::operator<<(std::ostream& os, const gu::AsioErrorCode& ec) { return (os << ec.message()); } gu::AsioErrorCode gu::AsioErrorCode::make_eof() { return {asio::error::misc_errors::eof, gu_asio_misc_category}; } bool gu::AsioErrorCode::is_eof() const { return (category_ && *category_ == gu_asio_misc_category && value_ == asio::error::misc_errors::eof); } bool gu::AsioErrorCode::is_system() const { return (not category_ || (category_ && *category_ == gu_asio_system_category)); } // // Utility methods // std::string gu::any_addr(const gu::AsioIpAddress& addr) { return ::any_addr(addr.impl().native()); } std::string gu::unescape_addr(const std::string& addr) { std::string ret(addr); size_t pos(ret.find('[')); if (pos != std::string::npos) ret.erase(pos, 1); pos = ret.find(']'); if (pos != std::string::npos) ret.erase(pos, 1); return ret; } gu::AsioIpAddress gu::make_address(const std::string& addr) { gu::AsioIpAddress ret; ret.impl().native() = ::make_address(addr); return ret; } // // SSL/TLS // // #ifdef GALERA_HAVE_SSL namespace { // Callback for reading SSL key protection password from file class SSLPasswordCallback { public: SSLPasswordCallback(const gu::Config& conf) : conf_(conf) { } std::string get_password() const { std::string file; try { file = conf_.get(gu::conf::ssl_password_file); } catch (const gu::NotSet&) { gu_throw_error(EINVAL) << gu::conf::ssl_password_file << " is required"; } std::ifstream ifs(file.c_str(), std::ios_base::in); if (ifs.good() == false) { gu_throw_system_error(errno) << "could not open password file '" << file << "'"; } std::string ret; std::getline(ifs, ret); return ret; } private: const gu::Config& conf_; }; } static void throw_last_SSL_error(const std::string& msg) { unsigned long const err(ERR_peek_last_error()); char errstr[120] = {0, }; ERR_error_string_n(err, errstr, sizeof(errstr)); gu_throw_error(EINVAL) << msg << ": " << err << ": '" << errstr << "'"; } // Exclude some errors which are generated by the SSL library. bool exclude_ssl_error(const asio::error_code& ec) { switch (ERR_GET_REASON(ec.value())) { // Short read errors seem to be generated quite frequently // by SSL library because of broken connections. For Galera // connections premature EOFs are not a problem because messages // are framed and the protocols are fault tolerant by design. // The error to suppress are: // SSL_R_SHORT_READ - OpenSSL < 3.0 // SSL_R_UNEXPECTED_EOF_WHILE_READING - OpenSSL >= 3.0 #ifdef SSL_R_SHORT_READ case SSL_R_SHORT_READ: return true; #endif /* SSL_R_SHORT_READ */ #ifdef SSL_R_UNEXPECTED_EOF_WHILE_READING case SSL_R_UNEXPECTED_EOF_WHILE_READING: // OpenSSL 3.0 and onwards. return true; #endif /* SSL_R_UNEXPECTED_EOF_WHILE_READING */ default: return false; } } // Return low level error info for asio::error_code if available. std::string extra_error_info(const asio::error_code& ec) { std::ostringstream os; if (ec.category() == asio::error::get_ssl_category()) { char errstr[120] = {0, }; ERR_error_string_n(ec.value(), errstr, sizeof(errstr)); os << ec.value() << ": '" << errstr << "'"; } return os.str(); } std::string gu::extra_error_info(const gu::AsioErrorCode& ec) { if (ec.category()) return ::extra_error_info(asio::error_code(ec.value(), ec.category()->native())); else return ""; } static SSL_CTX* native_ssl_ctx(asio::ssl::context& context) { #if ASIO_VERSION < 101401 return context.impl(); #else return context.native_handle(); #endif } static void ssl_prepare_context(const gu::Config& conf, asio::ssl::context& ctx, bool verify_peer_cert = true) { ctx.set_verify_mode(asio::ssl::context::verify_peer | (verify_peer_cert == true ? asio::ssl::context::verify_fail_if_no_peer_cert : 0)); SSLPasswordCallback cb(conf); ctx.set_password_callback( boost::bind(&SSLPasswordCallback::get_password, &cb)); std::string param; try { // In some older OpenSSL versions ECDH engines must be enabled // explicitly. Here we use SSL_CTX_set_ecdh_auto() or // SSL_CTX_set_tmp_ecdh() if present. #if defined(OPENSSL_HAS_SET_ECDH_AUTO) if (!SSL_CTX_set_ecdh_auto(native_ssl_ctx(ctx), 1)) { throw_last_SSL_error("SSL_CTX_set_ecdh_auto() failed"); } #elif defined(OPENSSL_HAS_SET_TMP_ECDH) { EC_KEY* const ecdh(EC_KEY_new_by_curve_name(NID_X9_62_prime256v1)); if (ecdh == NULL) { throw_last_SSL_error("EC_KEY_new_by_curve_name() failed"); } if (!SSL_CTX_set_tmp_ecdh(native_ssl_ctx(ctx),ecdh)) { throw_last_SSL_error("SSL_CTX_set_tmp_ecdh() failed"); } EC_KEY_free(ecdh); } #endif /* OPENSSL_HAS_SET_ECDH_AUTO | OPENSSL_HAS_SET_TMP_ECDH */ param = gu::conf::ssl_cert; ctx.use_certificate_chain_file(conf.get(param)); param = gu::conf::ssl_key; ctx.use_private_key_file(conf.get(param), asio::ssl::context::pem); param = gu::conf::ssl_ca; ctx.load_verify_file(conf.get(param, conf.get(gu::conf::ssl_cert))); param = gu::conf::ssl_cipher; std::string const value(conf.get(param)); if (!value.empty()) { if (!SSL_CTX_set_cipher_list(native_ssl_ctx(ctx), value.c_str())) { throw_last_SSL_error("Error setting SSL cipher list to '" + value + "'"); } else { log_info << "SSL cipher list set to '" << value << '\''; } } ctx.set_options(asio::ssl::context::no_sslv2 | asio::ssl::context::no_sslv3 | asio::ssl::context::no_tlsv1); } catch (asio::system_error& ec) { gu_throw_error(EINVAL) << "Bad value '" << conf.get(param, "") << "' for SSL parameter '" << param << "': " << ::extra_error_info(ec.code()); } catch (gu::NotSet& ec) { gu_throw_error(EINVAL) << "Missing required value for SSL parameter '" << param << "'"; } } /* checks if all mandatory SSL options are set */ static bool ssl_check_conf(const gu::Config& conf) { using namespace gu; bool explicit_ssl(false); if (conf.is_set(conf::use_ssl)) { if (conf.get(conf::use_ssl) == false) { return false; // SSL is explicitly disabled } else { explicit_ssl = true; } } int count(0); count += conf.is_set(conf::ssl_key); count += conf.is_set(conf::ssl_cert); bool const use_ssl(explicit_ssl || count > 0); if (use_ssl && count < 2) { gu_throw_error(EINVAL) << "To enable SSL at least both of '" << conf::ssl_key << "' and '" << conf::ssl_cert << "' must be set"; } return use_ssl; } static void init_use_ssl(gu::Config& conf) { // use ssl if either private key or cert file is specified bool use_ssl(conf.is_set(gu::conf::ssl_key) == true || conf.is_set(gu::conf::ssl_cert) == true); try { // overrides use_ssl if set explicitly use_ssl = conf.get(gu::conf::use_ssl); } catch (gu::NotSet& nf) {} if (use_ssl == true) { conf.set(gu::conf::use_ssl, true); } } void gu::ssl_register_params(gu::Config& conf) { // register SSL config parameters conf.add(gu::conf::use_ssl, gu::Config::Flag::read_only | gu::Config::Flag::type_bool); conf.add(gu::conf::ssl_cipher, gu::Config::Flag::read_only); conf.add(gu::conf::ssl_compression, gu::Config::Flag::read_only | gu::Config::Flag::type_bool | gu::Config::Flag::deprecated); conf.add(gu::conf::ssl_key, gu::Config::Flag::read_only); conf.add(gu::conf::ssl_cert, gu::Config::Flag::read_only); conf.add(gu::conf::ssl_ca, gu::Config::Flag::read_only); conf.add(gu::conf::ssl_password_file, gu::Config::Flag::read_only); conf.add(gu::conf::ssl_reload, gu::Config::Flag::type_bool); conf.add(gu::conf::socket_dynamic, gu::Config::Flag::read_only | gu::Config::Flag::type_bool); } void gu::ssl_param_set(const std::string& key, const std::string& val, gu::Config& conf) { if (key == gu::conf::ssl_reload) { if (conf.has(conf::use_ssl) && conf.get(conf::use_ssl, false)) { try { #if ASIO_VERSION < 101401 asio::io_service io_service; asio::ssl::context ctx(io_service, asio::ssl::context::sslv23); #else asio::ssl::context ctx(asio::ssl::context::sslv23); #endif ssl_prepare_context(conf, ctx); // Send signal gu::Signals::Instance().signal(gu::Signals::S_CONFIG_RELOAD_CERTIFICATE); } catch (asio::system_error& ec) { gu_throw_error(EINVAL) << "Initializing SSL context failed: " << ::extra_error_info(ec.code()); } } } else { throw gu::NotFound(); } } void gu::ssl_init_options(gu::Config& conf) { init_use_ssl(conf); bool use_ssl(ssl_check_conf(conf)); if (use_ssl == true) { // set defaults conf.set(conf::ssl_reload, 1); // cipher list const std::string cipher_list(conf.get(conf::ssl_cipher, "")); conf.set(conf::ssl_cipher, cipher_list); // compression try { (void) conf.get(conf::ssl_compression); // warn the user if socket.ssl_compression is set explicitly log_warn << "SSL compression is not effective. The option " << conf::ssl_compression << " is deprecated and " << "will be removed in future releases."; } catch (NotSet&) { // this is a desirable situation } log_info << "not using SSL compression"; sk_SSL_COMP_zero(SSL_COMP_get_compression_methods()); // verify that asio::ssl::context can be initialized with provided // values try { #if ASIO_VERSION < 101401 asio::io_service io_service; asio::ssl::context ctx(io_service, asio::ssl::context::sslv23); #else asio::ssl::context ctx(asio::ssl::context::sslv23); #endif ssl_prepare_context(conf, ctx); } catch (asio::system_error& ec) { gu_throw_error(EINVAL) << "Initializing SSL context failed: " << ::extra_error_info(ec.code()); } } } #endif // GALERA_HAVE_SSL bool gu::is_verbose_error(const gu::AsioErrorCode& ec) { // Suppress system error which occur frequently during configuration // changes and are not likely caused by programming errors. if (ec.is_system()) { switch (ec.value()) { case ECANCELED: // Socket close case EPIPE: // Writing while remote end closed connection case ECONNRESET: // Remote end closed connection case EBADF: // Socket closed before completion/read handler exec return true; default: return false; } } // EOF errors happen all the time when cluster configuration changes. if (ec.is_eof()) return true; #ifdef GALERA_HAVE_SSL // Suppress certain SSL errors. return (not ec.category() || *ec.category() != gu_asio_ssl_category || exclude_ssl_error(asio::error_code( ec.value(), ec.category()->native()))); #else return false; #endif // GALERA_HAVE_SSL } // // IO Service wrapper // gu::AsioIoService::AsioIoService(const gu::Config& conf) : impl_(std::unique_ptr(new Impl)) , conf_(conf) , signal_connection_() , dynamic_socket_(false) { signal_connection_ = gu::Signals::Instance().connect( gu::Signals::slot_type(&gu::AsioIoService::handle_signal, this, _1)); if (conf.has(gu::conf::socket_dynamic)) { dynamic_socket_ = conf.get(gu::conf::socket_dynamic, false); } #ifdef GALERA_HAVE_SSL load_crypto_context(); #endif // GALERA_HAVE_SSL } gu::AsioIoService::~AsioIoService() { signal_connection_.disconnect(); }; void gu::AsioIoService::handle_signal(const gu::Signals::SignalType& type) { switch(type) { case gu::Signals::SignalType::S_CONFIG_RELOAD_CERTIFICATE: #ifdef GALERA_HAVE_SSL load_crypto_context(); #endif // GALERA_HAVE_SSL break; default: break; } } bool gu::AsioIoService::ssl_enabled() const { #ifdef GALERA_HAVE_SSL return impl_->ssl_context_.get(); #else // GALERA_HAVE_SSL return false; #endif } void gu::AsioIoService::load_crypto_context() { #ifdef GALERA_HAVE_SSL if (conf_.has(conf::use_ssl) && conf_.get(conf::use_ssl, false)) { if (not impl_->ssl_context_) { impl_->ssl_context_ = std::unique_ptr( new asio::ssl::context(asio::ssl::context::sslv23)); } ssl_prepare_context(conf_, *impl_->ssl_context_); } #endif // GALERA_HAVE_SSL } void gu::AsioIoService::run_one() { impl_->native().run_one(); } void gu::AsioIoService::poll_one() { impl_->native().poll_one(); } size_t gu::AsioIoService::run() { return impl_->native().run(); } void gu::AsioIoService::post(std::function fun) { impl_->native().post(fun); } void gu::AsioIoService::stop() { impl_->native().stop(); } void gu::AsioIoService::reset() { impl_->native().reset(); } gu::AsioIoService::Impl& gu::AsioIoService::impl() { return *impl_; } std::shared_ptr gu::AsioIoService::make_socket( const gu::URI& uri, const std::shared_ptr& engine) { return std::make_shared(*this, uri.get_scheme(), engine); } std::shared_ptr gu::AsioIoService::make_datagram_socket( const gu::URI& uri) { if (uri.get_scheme() == gu::scheme::udp) return std::make_shared(*this); gu_throw_error(EINVAL) << "Datagram socket scheme " << uri.get_scheme() << " not supported"; return std::shared_ptr(); } std::shared_ptr gu::AsioIoService::make_acceptor( const gu::URI& uri) { return std::make_shared(*this, uri.get_scheme()); } // // Steady timer // class gu::AsioSteadyTimer::Impl { public: #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) typedef asio::deadline_timer native_timer_type; #else typedef asio::steady_timer native_timer_type; #endif /* #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) */ Impl(asio::io_service& io_service) : timer_(io_service) { } native_timer_type& native() { return timer_; } void handle_wait(const std::shared_ptr& handler, const asio::error_code& ec) { handler->handle_wait(AsioErrorCode(ec.value(), ec.category())); } private: native_timer_type timer_; }; #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) static inline boost::posix_time::time_duration to_native_duration(const gu::AsioClock::duration& duration) { return boost::posix_time::nanosec( std::chrono::duration_cast(duration).count()); } #else static inline std::chrono::steady_clock::duration to_native_duration(const gu::AsioClock::duration& duration) { return duration; } #endif gu::AsioSteadyTimer::AsioSteadyTimer( AsioIoService& io_service) : impl_(new Impl(io_service.impl().native())) { } gu::AsioSteadyTimer::~AsioSteadyTimer() { } void gu::AsioSteadyTimer::expires_from_now( const AsioClock::duration& duration) { impl_->native().expires_from_now(to_native_duration(duration)); } void gu::AsioSteadyTimer::async_wait( const std::shared_ptr& handler) { impl_->native().async_wait(boost::bind(&Impl::handle_wait, impl_.get(), handler, asio::placeholders::error)); } void gu::AsioSteadyTimer::cancel() { impl_->native().cancel(); } // // Allowlist // bool gu::allowlist_value_check(wsrep_allowlist_key_t key, const std::string& value) { if (gu_allowlist_service == nullptr) { return true; } wsrep_buf_t const check_value = { value.c_str(), value.length() }; wsrep_status_t result(gu_allowlist_service->allowlist_cb( gu_allowlist_service->context, key, &check_value)); switch (result) { case WSREP_OK: return true; case WSREP_NOT_ALLOWED: return false; default: gu_throw_error(EINVAL) << "Unknown allowlist callback response: " << result << ", aborting."; } } static std::mutex gu_allowlist_service_init_mutex; static size_t gu_allowlist_service_usage; int gu::init_allowlist_service_v1(wsrep_allowlist_service_v1_t* allowlist_service) { std::lock_guard lock(gu_allowlist_service_init_mutex); ++gu_allowlist_service_usage; if (gu_allowlist_service) { assert(gu_allowlist_service == allowlist_service); return 0; } gu_allowlist_service = allowlist_service; return 0; } void gu::deinit_allowlist_service_v1() { std::lock_guard lock(gu_allowlist_service_init_mutex); assert(gu_allowlist_service_usage > 0); --gu_allowlist_service_usage; if (gu_allowlist_service_usage == 0) gu_allowlist_service = 0; } std::atomic gu::gu_asio_node_isolation_mode{ WSREP_NODE_ISOLATION_NOT_ISOLATED }; galera-4-26.4.25/galerautils/src/gu_init.h000644 000164 177776 00000000632 15107057155 021427 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013 Codership Oy * * $Id$ */ /*! @file Common initializer for various galerautils parts. Currently it is * logger and CRC32C implementation. */ #ifndef _GU_INIT_H_ #define _GU_INIT_H_ #if defined(__cplusplus) extern "C" { #endif #include "gu_log.h" extern void gu_init (gu_log_cb_t log_cb); #if defined(__cplusplus) } #endif #endif /* _GU_INIT_H_ */ galera-4-26.4.25/galerautils/src/gu_buf.hpp000644 000164 177776 00000000376 15107057155 021605 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy */ /** * @file generic buffer declaration * * $Id$ */ #ifndef _GU_BUF_HPP_ #define _GU_BUF_HPP_ #include "gu_buf.h" namespace gu { typedef struct gu_buf Buf; } #endif /* _GU_BUF_HPP_ */ galera-4-26.4.25/galerautils/src/gu_log.c000644 000164 177776 00000007706 15107057155 021251 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2014 Codership Oy /** * @file Logging functions definitions * * $Id$ */ #include #include #include #include #include #include #include #include "gu_log.h" #include "gu_macros.h" /* Global configurable variables */ static FILE* gu_log_file = NULL; bool gu_log_self_tstamp = false; gu_log_severity_t gu_log_max_level = GU_LOG_INFO; int gu_conf_set_log_file (FILE *file) { gu_debug ("Log file changed by application"); if (file) { gu_log_file = file; } else { gu_log_file = stderr; } return 0; } int gu_conf_self_tstamp_on () { gu_debug ("Turning self timestamping on"); gu_log_self_tstamp = true; return 0; } int gu_conf_self_tstamp_off () { gu_debug ("Turning self timestamping off"); gu_log_self_tstamp = false; return 0; } int gu_conf_debug_on () { gu_log_max_level = GU_LOG_DEBUG; gu_debug ("Turning debug logging on"); return 0; } int gu_conf_debug_off () { gu_debug ("Turning debug logging off"); gu_log_max_level = GU_LOG_INFO; return 0; } /** Returns current timestamp in the provided buffer */ static inline int log_tstamp (char* tstamp, size_t const len) { int ret = 0; struct tm date; struct timeval time; gettimeofday (&time, NULL); localtime_r (&time.tv_sec, &date); /* 23 symbols */ ret = snprintf (tstamp, len, "%04d-%02d-%02d %02d:%02d:%02d.%03d ", date.tm_year + 1900, date.tm_mon + 1, date.tm_mday, date.tm_hour, date.tm_min, date.tm_sec, (int)time.tv_usec / 1000); return ret; } const char* gu_log_level_str[GU_LOG_DEBUG + 2] = { "FATAL: ", "ERROR: ", " WARN: ", " INFO: ", "DEBUG: ", "XXXXX: " }; /** * @function * Default logging function: simply writes to stderr or gu_log_file if set. */ void gu_log_cb_default (int severity, const char* msg) { FILE* log_file = gu_log_file ? gu_log_file : stderr; fputs (msg, log_file); fputc ('\n', log_file); fflush (log_file); } /** * Log function handle. * Can be changed by application through gu_conf_set_log_callback() */ gu_log_cb_t gu_log_cb = gu_log_cb_default; int gu_conf_set_log_callback (gu_log_cb_t callback) { if (callback) { gu_debug ("Logging function changed by application"); gu_log_cb = callback; } else { gu_debug ("Logging function restored to default"); gu_log_cb = gu_log_cb_default; } return 0; } int gu_log (gu_log_severity_t severity, const char* file, const char* function, const int line, const char* fmt, ...) { va_list ap; int max_string = 2048; char string[max_string]; /** @note: this can cause stack overflow * in kernel mode (both Linux and Windows). */ char* str = string; int len; if (gu_log_self_tstamp) { len = log_tstamp (str, max_string); str += len; max_string -= len; } if (gu_likely(max_string > 0)) { const char* log_level_str = gu_log_cb_default == gu_log_cb ? gu_log_level_str[severity] : ""; /* provide file:func():line info only if debug logging is on */ if (gu_likely(!gu_log_debug && severity > GU_LOG_ERROR)) { len = snprintf (str, max_string, "%s", log_level_str); } else { len = snprintf (str, max_string, "%s%s:%s():%d: ", log_level_str, file, function, line); } str += len; max_string -= len; va_start (ap, fmt); { if (gu_likely(max_string > 0 && NULL != fmt)) { vsnprintf (str, max_string, fmt, ap); } } va_end (ap); } /* actual logging */ gu_log_cb (severity, string); return 0; } galera-4-26.4.25/galerautils/src/gu_string_utils.hpp000644 000164 177776 00000001766 15107057155 023563 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2010 Codership Oy #ifndef __GU_STRING_UTILS_HPP__ #define __GU_STRING_UTILS_HPP__ #include #include namespace gu { /*! * @brief Split string into tokens using given separator * * @param sep token separator */ std::vector strsplit(const std::string& s, char sep = ' '); /*! * @brief Split string into tokens using given separator and escape. * * @param sep token separator * @param esc separator escape sequence ('\0' to disable escapes) * @param empty whether to return empty tokens */ std::vector tokenize(const std::string& s, char sep = ' ', char esc = '\\', bool empty = false); /*! Remove non-alnum symbols from the beginning and end of string */ void trim (std::string& s); } #endif /* __GU_STRING_UTILS_HPP__ */ galera-4-26.4.25/galerautils/src/gu_dbug.c000644 000164 177776 00000153173 15107057155 021411 0ustar00jenkinsnogroup000000 000000 /****************************************************************************** * * * N O T I C E * * * * Copyright Abandoned, 1987, Fred Fish * * * * * * This previously copyrighted work has been placed into the public * * domain by the author and may be freely used for any purpose, * * private or commercial. * * * * Because of the number of inquiries I was receiving about the use * * of this product in commercially developed works I have decided to * * simply make it public domain to further its unrestricted use. I * * specifically would be most happy to see this material become a * * part of the standard Unix distributions by AT&T and the Berkeley * * Computer Science Research Group, and a standard part of the GNU * * system from the Free Software Foundation. * * * * I would appreciate it, as a courtesy, if this notice is left in * * all copies and derivative works. Thank you. * * * * The author makes no warranty of any kind with respect to this * * product and explicitly disclaims any implied warranties of mer- * * chantability or fitness for any particular purpose. * * * ****************************************************************************** */ /* * FILE * * dbug.c runtime support routines for dbug package * * SCCS * * @(#)dbug.c 1.25 7/25/89 * * DESCRIPTION * * These are the runtime support routines for the dbug package. * The dbug package has two main components; the user include * file containing various macro definitions, and the runtime * support routines which are called from the macro expansions. * * Externally visible functions in the runtime support module * use the naming convention pattern "_db_xx...xx_", thus * they are unlikely to collide with user defined function names. * * AUTHOR(S) * * Fred Fish (base code) * Enhanced Software Technologies, Tempe, AZ * asuvax!mcdphx!estinc!fnf * * Binayak Banerjee (profiling enhancements) * seismo!bpa!sjuvax!bbanerje * * Michael Widenius: * DBUG_DUMP - To dump a pice of memory. * PUSH_FLAG "O" - To be used insted of "o" if we don't * want flushing (for slow systems) * PUSH_FLAG "A" - as 'O', but we will append to the out file instead * of creating a new one. * Check of malloc on entry/exit (option "S") * * Alexey Yurchenko: * - Renamed global symbols for use with galera project to avoid * collisions with other software (notably MySQL) * * Teemu Ollakka: * - Slight cleanups, removed some MySQL dependencies. * - All global variables should now have _gu_db prefix. * - Thread -> state mapping for multithreaded programs. * - Changed initialization so that it is done on the first * call to _gu_db_push(). * * Jan Lindström * - Silence coverity resource leak issue. * * $Id$ */ #include #include #include #include #include #include #ifndef GU_DBUG_ON #define GU_DBUG_ON #endif #include "gu_dbug.h" /* Make a new type: bool_t */ typedef enum { FALSE = (0 != 0), TRUE = (!FALSE) } bool_t; #define _VARARGS(X) X #define FN_LIBCHAR 1024 #define FN_REFLEN 1024 #define NullS "" #include #if defined(MSDOS) || defined(__WIN__) #include #endif #ifdef _GU_DBUG_CONDITION_ #define _GU_DBUG_START_CONDITION_ "d:t" #else #define _GU_DBUG_START_CONDITION_ "" #endif /* * Manifest constants that should not require any changes. */ #define EOS '\000' /* End Of String marker */ /* * Manifest constants which may be "tuned" if desired. */ #define PRINTBUF 1024 /* Print buffer size */ #define INDENT 2 /* Indentation per trace level */ #define MAXDEPTH 200 /* Maximum trace depth default */ /* * The following flags are used to determine which * capabilities the user has enabled with the state * push macro. */ #define TRACE_ON 000001 /* Trace enabled */ #define DEBUG_ON 000002 /* Debug enabled */ #define FILE_ON 000004 /* File name print enabled */ #define LINE_ON 000010 /* Line number print enabled */ #define DEPTH_ON 000020 /* Function nest level print enabled */ #define PROCESS_ON 000040 /* Process name print enabled */ #define NUMBER_ON 000100 /* Number each line of output */ #define PROFILE_ON 000200 /* Print out profiling code */ #define PID_ON 000400 /* Identify each line with process id */ #define SANITY_CHECK_ON 001000 /* Check my_malloc on GU_DBUG_ENTER */ #define FLUSH_ON_WRITE 002000 /* Flush on every write */ #define TRACING (_gu_db_stack -> flags & TRACE_ON) #define DEBUGGING (_gu_db_stack -> flags & DEBUG_ON) #define PROFILING (_gu_db_stack -> flags & PROFILE_ON) #define STREQ(a,b) (strcmp(a,b) == 0) #define min(a,b) ((a) < (b) ? (a) : (b)) #define max(a,b) ((a) > (b) ? (a) : (b)) /* * Typedefs to make things more obvious.??? */ #ifndef __WIN__ typedef int BOOLEAN; #else #define BOOLEAN BOOL #endif /* * Make it easy to change storage classes if necessary. */ #define IMPORT extern /* Names defined externally */ #define EXPORT /* Allocated here, available globally */ #define AUTO auto /* Names to be allocated on stack */ #define REGISTER register /* Names to be placed in registers */ /* * The default file for profiling. Could also add another flag * (G?) which allowed the user to specify this. * * If the automatic variables get allocated on the stack in * reverse order from their declarations, then define AUTOS_REVERSE. * This is used by the code that keeps track of stack usage. For * forward allocation, the difference in the dbug frame pointers * represents stack used by the callee function. For reverse allocation, * the difference represents stack used by the caller function. * */ #define PROF_FILE "dbugmon.out" #define PROF_EFMT "E\t%ld\t%s\n" #define PROF_SFMT "S\t%lx\t%lx\t%s\n" #define PROF_XFMT "X\t%ld\t%s\n" #ifdef M_I386 /* predefined by xenix 386 compiler */ #define AUTOS_REVERSE 1 #endif /* * Variables which are available externally but should only * be accessed via the macro package facilities. */ FILE *_gu_db_fp_ = (FILE*) 0; /* Output stream, default stderr */ char *_gu_db_process_ = (char*) "dbug"; /* Pointer to process name; argv[0] */ FILE *_gu_db_pfp_ = (FILE*) 0; /* Profile stream, 'dbugmon.out' */ BOOLEAN _gu_db_on_ = FALSE; /* TRUE if debugging currently on */ BOOLEAN _gu_db_pon_ = FALSE; /* TRUE if profile currently on */ BOOLEAN _gu_no_db_ = TRUE; /* TRUE if no debugging at all */ /* * Externally supplied functions. */ /* * Galera does not provided _sanity which is used when SAFEMALLOC is * defined */ #undef SAFEMALLOC IMPORT int _sanity(const char *file, uint line); /* * The user may specify a list of functions to trace or * debug. These lists are kept in a linear linked list, * a very simple implementation. */ struct link { char *str; /* Pointer to link's contents */ struct link *next_link; /* Pointer to the next link */ }; /* * Debugging states can be pushed or popped off of a * stack which is implemented as a linked list. Note * that the head of the list is the current state and the * stack is pushed by adding a new state to the head of the * list or popped by removing the first link. */ struct state { int flags; /* Current state flags */ int maxdepth; /* Current maximum trace depth */ uint delay; /* Delay after each output line */ int sub_level; /* Sub this from code_state->level */ FILE* out_file; /* Current output stream */ FILE* prof_file; /* Current profiling stream */ char name[FN_REFLEN]; /* Name of output file */ struct link* functions; /* List of functions */ struct link* p_functions; /* List of profiled functions */ struct link* keywords; /* List of debug keywords */ struct link* processes; /* List of process names */ struct state* next_state; /* Next state in the list */ }; /* * Local variables not seen by user. */ static struct state* _gu_db_stack = 0; typedef struct st_code_state { int lineno; /* Current debugger output line number */ int level; /* Current function nesting level */ const char* func; /* Name of current user function */ const char* file; /* Name of current user file */ char** framep; /* Pointer to current frame */ int jmplevel; /* Remember nesting level at setjmp () */ const char* jmpfunc; /* Remember current function for setjmp */ const char* jmpfile; /* Remember current file for setjmp */ /* * The following variables are used to hold the state information * between the call to _gu_db_pargs_() and _gu_db_doprnt_(), during * expansion of the GU_DBUG_PRINT macro. This is the only macro * that currently uses these variables. * * These variables are currently used only by _gu_db_pargs_() and * _gu_db_doprnt_(). */ uint u_line; /* User source code line number */ const char* u_keyword; /* Keyword for current macro */ int locked; /* If locked with _gu_db_lock_file */ } CODE_STATE; /* Parse a debug command string */ static struct link *ListParse(char *ctlp); /* Make a fresh copy of a string */ static char *StrDup(const char *str); /* Open debug output stream */ static void GU_DBUGOpenFile(const char *name, int append); #ifndef THREAD /* Open profile output stream */ static FILE *OpenProfile(const char *name); /* Profile if asked for it */ static BOOLEAN DoProfile(void); /* Return current user time (ms) */ static unsigned long Clock(void); #endif /* Close debug output stream */ static void CloseFile(FILE * fp); /* Push current debug state */ static void PushState(void); /* Test for tracing enabled */ static BOOLEAN DoTrace(CODE_STATE * state); /* Test to see if file is writable */ #if !(!defined(HAVE_ACCESS) || defined(MSDOS)) static BOOLEAN Writable(char *pathname); /* Change file owner and group */ static void ChangeOwner(char *pathname); /* Allocate memory for runtime support */ #endif static char *DbugMalloc(int size); /* Remove leading pathname components */ static char *BaseName(const char *pathname); static void DoPrefix(uint line); static void FreeList(struct link *linkp); static void Indent(int indent); static BOOLEAN InList(struct link *linkp, const char *cp); static void dbug_flush(CODE_STATE *); static void DbugExit(const char *why); static int DelayArg(int value); /* Supplied in Sys V runtime environ */ /* Break string into tokens */ static char *static_strtok(char *s1, char chr); /* * Miscellaneous printf format strings. */ #define ERR_MISSING_RETURN "%s: missing GU_DBUG_RETURN or GU_DBUG_VOID_RETURN macro in function \"%s\"\n" #define ERR_OPEN "%s: can't open debug output stream \"%s\": " #define ERR_CLOSE "%s: can't close debug file: " #define ERR_ABORT "%s: debugger aborting because %s\n" #define ERR_CHOWN "%s: can't change owner/group of \"%s\": " /* * Macros and defines for testing file accessibility under UNIX and MSDOS. */ #undef EXISTS #if !defined(HAVE_ACCESS) || defined(MSDOS) #define EXISTS(pathname) (FALSE) /* Assume no existence */ #define Writable(name) (TRUE) #else #define EXISTS(pathname) (access (pathname, F_OK) == 0) #define WRITABLE(pathname) (access (pathname, W_OK) == 0) #endif #ifndef MSDOS #define ChangeOwner(name) #endif /* * Translate some calls among different systems. */ #if defined(unix) || defined(xenix) || defined(VMS) || defined(__NetBSD__) # define Delay(A) sleep((uint) A) #elif defined(AMIGA) IMPORT int Delay(); /* Pause for given number of ticks */ #else static int Delay(int ticks); #endif /* ** Macros to allow dbugging with threads */ #ifdef THREAD #include pthread_once_t _gu_db_once = PTHREAD_ONCE_INIT; pthread_mutex_t _gu_db_mutex = PTHREAD_MUTEX_INITIALIZER; struct state_map { pthread_t th; CODE_STATE *state; struct state_map *prev; struct state_map *next; }; #define _GU_DB_STATE_MAP_BUCKETS (1 << 7) static struct state_map *_gu_db_state_map[_GU_DB_STATE_MAP_BUCKETS]; /* * This hash is probably good enough. Golden ratio 2654435761U from * http://www.concentric.net/~Ttwang/tech/inthash.htm * * UPDATE: it is good enough for input with significant variation in * 32 lower bits. */ static inline unsigned long pt_hash(const pthread_t th) { unsigned long k = (unsigned long)th; uint64_t ret = 2654435761U * k; // since we're returning a masked hash key, all considerations // for "reversibility" can be dropped. Instead we can help // higher input bits influence lower output bits. XOR rules. return (ret ^ (ret >> 32)) & (_GU_DB_STATE_MAP_BUCKETS - 1); } static CODE_STATE *state_map_find(const pthread_t th) { unsigned int key = pt_hash(th); struct state_map *sm = _gu_db_state_map[key]; while (sm && sm->th != th) sm = sm->next; return sm ? sm->state : NULL; } void state_map_insert(const pthread_t th, CODE_STATE *state) { unsigned int key; struct state_map *sm; assert(state_map_find(th) == NULL); key = pt_hash(th); sm = malloc(sizeof(struct state_map)); sm->state = state; sm->th = th; pthread_mutex_lock(&_gu_db_mutex); sm->prev = NULL; sm->next = _gu_db_state_map[key]; if (sm->next) sm->next->prev = sm; _gu_db_state_map[key] = sm; pthread_mutex_unlock(&_gu_db_mutex); } void state_map_erase(const pthread_t th) { unsigned int key; struct state_map *sm; key = pt_hash(th); sm = _gu_db_state_map[key]; while (sm && sm->th != th) sm = sm->next; assert(sm); pthread_mutex_lock(&_gu_db_mutex); if (sm->prev) { sm->prev->next = sm->next; } else { assert(_gu_db_state_map[key] == sm); _gu_db_state_map[key] = sm->next; } if (sm->next) sm->next->prev = sm->prev; pthread_mutex_unlock(&_gu_db_mutex); free(sm); } static CODE_STATE * code_state(void) { CODE_STATE *state = 0; if ((state = state_map_find(pthread_self())) == NULL) { state = malloc(sizeof(CODE_STATE)); memset(state, 0, sizeof(CODE_STATE)); state->func = "?func"; state->file = "?file"; state->u_keyword = "?"; state_map_insert(pthread_self(), state); } return state; } static void code_state_cleanup(CODE_STATE *state) { if (state->level == 0) { state_map_erase(pthread_self()); free(state); } } static void _gu_db_init() { if (!_gu_db_fp_) _gu_db_fp_ = stderr; /* Output stream, default stderr */ memset(_gu_db_state_map, 0, sizeof(_gu_db_state_map)); } #else /* !THREAD */ #define _gu_db_init() #define code_state() (&static_code_state) #define code_state_cleanup(A) do {} while (0) #define pthread_mutex_lock(A) {} #define pthread_mutex_unlock(A) {} static CODE_STATE static_code_state = { 0, 0, "?func", "?file", NULL, 0, NULL, NULL, 0, "?", 0 }; #endif /* * FUNCTION * * _gu_db_push_ push current debugger state and set up new one * * SYNOPSIS * * VOID _gu_db_push_ (control) * char *control; * * DESCRIPTION * * Given pointer to a debug control string in "control", pushes * the current debug state, parses the control string, and sets * up a new debug state. * * The only attribute of the new state inherited from the previous * state is the current function nesting level. This can be * overridden by using the "r" flag in the control string. * * The debug control string is a sequence of colon separated fields * as follows: * * ::...: * * Each field consists of a mandatory flag character followed by * an optional "," and comma separated list of modifiers: * * flag[,modifier,modifier,...,modifier] * * The currently recognized flag characters are: * * d Enable output from GU_DBUG_ macros for * for the current state. May be followed * by a list of keywords which selects output * only for the GU_DBUG macros with that keyword. * A null list of keywords implies output for * all macros. * * D Delay after each debugger output line. * The argument is the number of tenths of seconds * to delay, subject to machine capabilities. * I.E. -#D,20 is delay two seconds. * * f Limit debugging and/or tracing, and profiling to the * list of named functions. Note that a null list will * disable all functions. The appropriate "d" or "t" * flags must still be given, this flag only limits their * actions if they are enabled. * * F Identify the source file name for each * line of debug or trace output. * * i Identify the process with the pid for each line of * debug or trace output. * * g Enable profiling. Create a file called 'dbugmon.out' * containing information that can be used to profile * the program. May be followed by a list of keywords * that select profiling only for the functions in that * list. A null list implies that all functions are * considered. * * L Identify the source file line number for * each line of debug or trace output. * * n Print the current function nesting depth for * each line of debug or trace output. * * N Number each line of dbug output. * * o Redirect the debugger output stream to the * specified file. The default output is stderr. * * O As O but the file is really flushed between each * write. When neaded the file is closed and reopened * between each write. * * p Limit debugger actions to specified processes. * A process must be identified with the * GU_DBUG_PROCESS macro and match one in the list * for debugger actions to occur. * * P Print the current process name for each * line of debug or trace output. * * r When pushing a new state, do not inherit * the previous state's function nesting level. * Useful when the output is to start at the * left margin. * * S Do function _sanity(_file_,_line_) at each * debugged function until _sanity() returns * something that differs from 0. * (Moustly used with my_malloc) * * t Enable function call/exit trace lines. * May be followed by a list (containing only * one modifier) giving a numeric maximum * trace level, beyond which no output will * occur for either debugging or tracing * macros. The default is a compile time * option. * * Some examples of debug control strings which might appear * on a shell command line (the "-#" is typically used to * introduce a control string to an application program) are: * * -#d:t * -#d:f,main,subr1:F:L:t,20 * -#d,input,output,files:n * * For convenience, any leading "-#" is stripped off. * */ void _gu_db_push_(const char *control) { register char *scan; register struct link *temp; CODE_STATE *state; char *new_str; pthread_once(&_gu_db_once, &_gu_db_init); if (control && *control == '-') { if (*++control == '#') control++; } if (*control) _gu_no_db_ = FALSE; /* We are using dbug after all */ else return; new_str = StrDup(control); PushState(); state = code_state(); scan = static_strtok(new_str, ':'); for (; scan != NULL; scan = static_strtok((char *) NULL, ':')) { switch (*scan++) { case 'd': _gu_db_on_ = TRUE; _gu_db_stack->flags |= DEBUG_ON; if (*scan++ == ',') { temp = ListParse(scan); if (_gu_db_stack->keywords) { temp->next_link= _gu_db_stack->keywords; _gu_db_stack->keywords = temp; } else _gu_db_stack->keywords = temp; } break; case 'D': _gu_db_stack->delay = 0; if (*scan++ == ',') { temp = ListParse(scan); _gu_db_stack->delay = DelayArg(atoi(temp->str)); FreeList(temp); } break; case 'f': if (*scan++ == ',') { temp = ListParse(scan); if (_gu_db_stack->functions) { temp->next_link= _gu_db_stack->functions; _gu_db_stack->functions = temp; } else _gu_db_stack->functions = temp; } break; case 'F': _gu_db_stack->flags |= FILE_ON; break; case 'i': _gu_db_stack->flags |= PID_ON; break; #ifndef THREAD case 'g': _gu_db_pon_ = TRUE; if (OpenProfile(PROF_FILE)) { _gu_db_stack->flags |= PROFILE_ON; if (*scan++ == ',') { temp = ListParse(scan); if (_gu_db_stack->p_functions) { temp->next_link= _gu_db_stack->p_functions; _gu_db_stack->p_functions = temp; } else _gu_db_stack->p_functions = temp; } break; #endif case 'L': _gu_db_stack->flags |= LINE_ON; break; case 'n': _gu_db_stack->flags |= DEPTH_ON; break; case 'N': _gu_db_stack->flags |= NUMBER_ON; break; case 'A': case 'O': _gu_db_stack->flags |= FLUSH_ON_WRITE; // fall through case 'a': case 'o': if (*scan++ == ',') { temp = ListParse(scan); GU_DBUGOpenFile(temp->str, (int) (scan[-2] == 'A' || scan[-2] == 'a')); FreeList(temp); } else { GU_DBUGOpenFile("-", 0); } break; case 'p': if (*scan++ == ',') { temp = ListParse(scan); if (_gu_db_stack->processes) { temp->next_link= _gu_db_stack->processes; _gu_db_stack->processes = temp; } else _gu_db_stack->processes = temp; } break; case 'P': _gu_db_stack->flags |= PROCESS_ON; break; case 'r': _gu_db_stack->sub_level = state->level; break; case 't': _gu_db_stack->flags |= TRACE_ON; if (*scan++ == ',') { temp = ListParse(scan); _gu_db_stack->maxdepth = atoi(temp->str); FreeList(temp); } break; case 'S': _gu_db_stack->flags |= SANITY_CHECK_ON; break; } } free(new_str); } /* * FUNCTION * * _gu_db_pop_ pop the debug stack * * DESCRIPTION * * Pops the debug stack, returning the debug state to its * condition prior to the most recent _gu_db_push_ invocation. * Note that the pop will fail if it would remove the last * valid state from the stack. This prevents user errors * in the push/pop sequence from screwing up the debugger. * Maybe there should be some kind of warning printed if the * user tries to pop too many states. * */ void _gu_db_pop_() { register struct state *discard; discard = _gu_db_stack; if (discard != NULL && discard->next_state != NULL) { _gu_db_stack = discard->next_state; _gu_db_fp_ = _gu_db_stack->out_file; _gu_db_pfp_ = _gu_db_stack->prof_file; if (discard->keywords != NULL) { FreeList(discard->keywords); } if (discard->functions != NULL) { FreeList(discard->functions); } if (discard->processes != NULL) { FreeList(discard->processes); } if (discard->p_functions != NULL) { FreeList(discard->p_functions); } CloseFile(discard->out_file); if (discard->prof_file) CloseFile(discard->prof_file); free((char *) discard); if (!(_gu_db_stack->flags & DEBUG_ON)) _gu_db_on_ = 0; } else { if (_gu_db_stack) _gu_db_stack->flags &= ~DEBUG_ON; _gu_db_on_ = 0; } } /* * FUNCTION * * _gu_db_enter_ process entry point to user function * * SYNOPSIS * * VOID _gu_db_enter_ (_func_, _file_, _line_, * _sfunc_, _sfile_, _slevel_, _sframep_) * char *_func_; points to current function name * char *_file_; points to current file name * int _line_; called from source line number * char **_sfunc_; save previous _func_ * char **_sfile_; save previous _file_ * int *_slevel_; save previous nesting level * char ***_sframep_; save previous frame pointer * * DESCRIPTION * * Called at the beginning of each user function to tell * the debugger that a new function has been entered. * Note that the pointers to the previous user function * name and previous user file name are stored on the * caller's stack (this is why the ENTER macro must be * the first "executable" code in a function, since it * allocates these storage locations). The previous nesting * level is also stored on the callers stack for internal * self consistency checks. * * Also prints a trace line if tracing is enabled and * increments the current function nesting depth. * * Note that this mechanism allows the debugger to know * what the current user function is at all times, without * maintaining an internal stack for the function names. * */ void _gu_db_enter_(const char *_func_, const char *_file_, uint _line_, const char **_sfunc_, const char **_sfile_, uint * _slevel_, char ***_sframep_ __attribute__ ((unused))) { register CODE_STATE *state; if (!_gu_no_db_) { int save_errno = errno; state = code_state(); *_sfunc_ = state->func; *_sfile_ = state->file; state->func = (char *) _func_; state->file = (char *) _file_; /* BaseName takes time !! */ *_slevel_ = ++state->level; #ifndef THREAD *_sframep_ = state->framep; state->framep = (char **) _sframep_; if (DoProfile()) { long stackused; if (*state->framep == NULL) { stackused = 0; } else { stackused = ((long) (*state->framep)) - ((long) (state->framep)); stackused = stackused > 0 ? stackused : -stackused; } (void) fprintf(_gu_db_pfp_, PROF_EFMT, Clock(), state->func); #ifdef AUTOS_REVERSE (void) fprintf(_gu_db_pfp_, PROF_SFMT, state->framep, stackused, *_sfunc_); #else (void) fprintf(_gu_db_pfp_, PROF_SFMT, (ulong) state->framep, stackused, state->func); #endif (void) fflush(_gu_db_pfp_); } #endif if (DoTrace(state)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(_line_); Indent(state->level); (void) fprintf(_gu_db_fp_, ">%s\n", state->func); dbug_flush(state); /* This does a unlock */ } #ifdef SAFEMALLOC if (_gu_db_stack->flags & SANITY_CHECK_ON) if (_sanity(_file_, _line_)) /* Check of my_malloc */ _gu_db_stack->flags &= ~SANITY_CHECK_ON; #endif errno = save_errno; } } /* * FUNCTION * * _gu_db_return_ process exit from user function * * SYNOPSIS * * VOID _gu_db_return_ (_line_, _sfunc_, _sfile_, _slevel_) * int _line_; current source line number * char **_sfunc_; where previous _func_ is to be retrieved * char **_sfile_; where previous _file_ is to be retrieved * int *_slevel_; where previous level was stashed * * DESCRIPTION * * Called just before user function executes an explicit or implicit * return. Prints a trace line if trace is enabled, decrements * the current nesting level, and restores the current function and * file names from the defunct function's stack. * */ void _gu_db_return_(uint _line_, const char **_sfunc_, const char **_sfile_, uint * _slevel_) { CODE_STATE *state; if (!_gu_no_db_) { int save_errno = errno; if (!(state = code_state())) return; /* Only happens at end of program */ if (_gu_db_stack->flags & (TRACE_ON | DEBUG_ON | PROFILE_ON)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); if (state->level != (int) *_slevel_) (void) fprintf(_gu_db_fp_, ERR_MISSING_RETURN, _gu_db_process_, state->func); else { #ifdef SAFEMALLOC if (_gu_db_stack->flags & SANITY_CHECK_ON) if (_sanity(*_sfile_, _line_)) _gu_db_stack->flags &= ~SANITY_CHECK_ON; #endif #ifndef THREAD if (DoProfile()) (void) fprintf(_gu_db_pfp_, PROF_XFMT, Clock(), state->func); #endif if (DoTrace(state)) { DoPrefix(_line_); Indent(state->level); (void) fprintf(_gu_db_fp_, "<%s\n", state->func); } } dbug_flush(state); } state->level = *_slevel_ - 1; state->func = *_sfunc_; state->file = *_sfile_; #ifndef THREAD if (state->framep != NULL) state->framep = (char **) *state->framep; #endif errno = save_errno; code_state_cleanup(state); } } /* * FUNCTION * * _gu_db_pargs_ log arguments for subsequent use by _gu_db_doprnt_() * * SYNOPSIS * * VOID _gu_db_pargs_ (_line_, keyword) * int _line_; * char *keyword; * * DESCRIPTION * * The new universal printing macro GU_DBUG_PRINT, which replaces * all forms of the GU_DBUG_N macros, needs two calls to runtime * support routines. The first, this function, remembers arguments * that are used by the subsequent call to _gu_db_doprnt_(). * */ void _gu_db_pargs_(uint _line_, const char *keyword) { CODE_STATE *state = code_state(); state->u_line = _line_; state->u_keyword = (char *) keyword; } /* * FUNCTION * * _gu_db_doprnt_ handle print of debug lines * * SYNOPSIS * * VOID _gu_db_doprnt_ (format, va_alist) * char *format; * va_dcl; * * DESCRIPTION * * When invoked via one of the GU_DBUG macros, tests the current keyword * set by calling _gu_db_pargs_() to see if that macro has been selected * for processing via the debugger control string, and if so, handles * printing of the arguments via the format string. The line number * of the GU_DBUG macro in the source is found in u_line. * * Note that the format string SHOULD NOT include a terminating * newline, this is supplied automatically. * */ #include void _gu_db_doprnt_(const char *format, ...) { va_list args; CODE_STATE *state; state = code_state(); va_start(args, format); if (_gu_db_keyword_(state->u_keyword)) { int save_errno = errno; if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(state->u_line); if (TRACING) { Indent(state->level + 1); } else { (void) fprintf(_gu_db_fp_, "%s: ", state->func); } (void) fprintf(_gu_db_fp_, "%s: ", state->u_keyword); (void) vfprintf(_gu_db_fp_, format, args); va_end(args); (void) fputc('\n', _gu_db_fp_); dbug_flush(state); errno = save_errno; } va_end(args); code_state_cleanup(state); } /* * FUNCTION * * _gu_db_dump_ dump a string until '\0' is found * * SYNOPSIS * * void _gu_db_dump_ (_line_,keyword,memory,length) * int _line_; current source line number * char *keyword; * char *memory; Memory to print * int length; Bytes to print * * DESCRIPTION * Dump N characters in a binary array. * Is used to examine corrputed memory or arrays. */ void _gu_db_dump_(uint _line_, const char *keyword, const char *memory, uint length) { int pos; char dbuff[90]; CODE_STATE *state; state = code_state(); if (_gu_db_keyword_((char *) keyword)) { if (!state->locked) pthread_mutex_lock(&_gu_db_mutex); DoPrefix(_line_); if (TRACING) { Indent(state->level + 1); pos = min(max(state->level - _gu_db_stack->sub_level, 0) * INDENT, 80); } else { fprintf(_gu_db_fp_, "%s: ", state->func); } sprintf(dbuff, "%s: Memory: %lx Bytes: (%d)\n", keyword, (ulong) memory, length); (void) fputs(dbuff, _gu_db_fp_); pos = 0; while (length-- > 0) { uint tmp = *((unsigned char *) memory++); if ((pos += 3) >= 80) { fputc('\n', _gu_db_fp_); pos = 3; } fputc(_gu_dig_vec[((tmp >> 4) & 15)], _gu_db_fp_); fputc(_gu_dig_vec[tmp & 15], _gu_db_fp_); fputc(' ', _gu_db_fp_); } (void) fputc('\n', _gu_db_fp_); dbug_flush(state); } code_state_cleanup(state); } /* * FUNCTION * * ListParse parse list of modifiers in debug control string * * SYNOPSIS * * static struct link *ListParse (ctlp) * char *ctlp; * * DESCRIPTION * * Given pointer to a comma separated list of strings in "cltp", * parses the list, building a list and returning a pointer to it. * The original comma separated list is destroyed in the process of * building the linked list, thus it had better be a duplicate * if it is important. * * Note that since each link is added at the head of the list, * the final list will be in "reverse order", which is not * significant for our usage here. * */ static struct link * ListParse(char *ctlp) { REGISTER char *start; REGISTER struct link *new_malloc; REGISTER struct link *head; head = NULL; while (*ctlp != EOS) { start = ctlp; while (*ctlp != EOS && *ctlp != ',') { ctlp++; } if (*ctlp == ',') { *ctlp++ = EOS; } new_malloc = (struct link *) DbugMalloc(sizeof(struct link)); new_malloc->str = StrDup(start); new_malloc->next_link = head; head = new_malloc; } return (head); } /* * FUNCTION * * InList test a given string for member of a given list * * SYNOPSIS * * static BOOLEAN InList (linkp, cp) * struct link *linkp; * char *cp; * * DESCRIPTION * * Tests the string pointed to by "cp" to determine if it is in * the list pointed to by "linkp". Linkp points to the first * link in the list. If linkp is NULL then the string is treated * as if it is in the list (I.E all strings are in the null list). * This may seem rather strange at first but leads to the desired * operation if no list is given. The net effect is that all * strings will be accepted when there is no list, and when there * is a list, only those strings in the list will be accepted. * */ static BOOLEAN InList(struct link *linkp, const char *cp) { REGISTER struct link *scan; REGISTER BOOLEAN result; if (linkp == NULL) { result = TRUE; } else { result = FALSE; for (scan = linkp; scan != NULL; scan = scan->next_link) { if (STREQ(scan->str, cp)) { result = TRUE; break; } } } return (result); } /* * FUNCTION * * PushState push current state onto stack and set up new one * * SYNOPSIS * * static VOID PushState () * * DESCRIPTION * * Pushes the current state on the state stack, and inits * a new state. The only parameter inherited from the previous * state is the function nesting level. This action can be * inhibited if desired, via the "r" flag. * * The state stack is a linked list of states, with the new * state added at the head. This allows the stack to grow * to the limits of memory if necessary. * */ static void PushState() { REGISTER struct state *new_malloc; new_malloc = (struct state *) DbugMalloc(sizeof(struct state)); new_malloc->flags = 0; new_malloc->delay = 0; new_malloc->maxdepth = MAXDEPTH; new_malloc->sub_level = 0; new_malloc->out_file = stderr; new_malloc->prof_file = (FILE *) 0; new_malloc->functions = NULL; new_malloc->p_functions = NULL; new_malloc->keywords = NULL; new_malloc->processes = NULL; new_malloc->next_state = _gu_db_stack; _gu_db_stack = new_malloc; } /* * FUNCTION * * DoTrace check to see if tracing is current enabled * * SYNOPSIS * * static BOOLEAN DoTrace (stack) * * DESCRIPTION * * Checks to see if tracing is enabled based on whether the * user has specified tracing, the maximum trace depth has * not yet been reached, the current function is selected, * and the current process is selected. Returns TRUE if * tracing is enabled, FALSE otherwise. * */ static BOOLEAN DoTrace(CODE_STATE * state) { register BOOLEAN trace = FALSE; if (TRACING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->functions, state->func) && InList(_gu_db_stack->processes, _gu_db_process_)) trace = TRUE; return (trace); } /* * FUNCTION * * DoProfile check to see if profiling is current enabled * * SYNOPSIS * * static BOOLEAN DoProfile () * * DESCRIPTION * * Checks to see if profiling is enabled based on whether the * user has specified profiling, the maximum trace depth has * not yet been reached, the current function is selected, * and the current process is selected. Returns TRUE if * profiling is enabled, FALSE otherwise. * */ #ifndef THREAD static BOOLEAN DoProfile() { REGISTER BOOLEAN profile; CODE_STATE *state; state = code_state(); profile = FALSE; if (PROFILING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->p_functions, state->func) && InList(_gu_db_stack->processes, _gu_db_process_)) profile = TRUE; return (profile); } #endif /* * FUNCTION * * _gu_db_keyword_ test keyword for member of keyword list * * SYNOPSIS * * BOOLEAN _gu_db_keyword_ (keyword) * char *keyword; * * DESCRIPTION * * Test a keyword to determine if it is in the currently active * keyword list. As with the function list, a keyword is accepted * if the list is null, otherwise it must match one of the list * members. When debugging is not on, no keywords are accepted. * After the maximum trace level is exceeded, no keywords are * accepted (this behavior subject to change). Additionally, * the current function and process must be accepted based on * their respective lists. * * Returns TRUE if keyword accepted, FALSE otherwise. * */ BOOLEAN _gu_db_keyword_(const char *keyword) { REGISTER BOOLEAN result; CODE_STATE *state; state = code_state(); result = FALSE; if (DEBUGGING && state->level <= _gu_db_stack->maxdepth && InList(_gu_db_stack->functions, state->func) && InList(_gu_db_stack->keywords, keyword) && InList(_gu_db_stack->processes, _gu_db_process_)) result = TRUE; return (result); } /* * FUNCTION * * Indent indent a line to the given indentation level * * SYNOPSIS * * static VOID Indent (indent) * int indent; * * DESCRIPTION * * Indent a line to the given level. Note that this is * a simple minded but portable implementation. * There are better ways. * * Also, the indent must be scaled by the compile time option * of character positions per nesting level. * */ static void Indent(int indent) { REGISTER int count; indent = max(indent - 1 - _gu_db_stack->sub_level, 0) * INDENT; for (count = 0; count < indent; count++) { if ((count % INDENT) == 0) fputc('|', _gu_db_fp_); else fputc(' ', _gu_db_fp_); } } /* * FUNCTION * * FreeList free all memory associated with a linked list * * SYNOPSIS * * static VOID FreeList (linkp) * struct link *linkp; * * DESCRIPTION * * Given pointer to the head of a linked list, frees all * memory held by the list and the members of the list. * */ static void FreeList(struct link *linkp) { REGISTER struct link *old; while (linkp != NULL) { old = linkp; linkp = linkp->next_link; if (old->str != NULL) { free(old->str); } free((char *) old); } } /* * FUNCTION * * StrDup make a duplicate of a string in new memory * * SYNOPSIS * * static char *StrDup (my_string) * char *string; * * DESCRIPTION * * Given pointer to a string, allocates sufficient memory to make * a duplicate copy, and copies the string to the newly allocated * memory. Failure to allocated sufficient memory is immediately * fatal. * */ static char * StrDup(const char *str) { register char *new_malloc; new_malloc = DbugMalloc((int) strlen(str) + 1); (void) strcpy(new_malloc, str); return (new_malloc); } /* * FUNCTION * * DoPrefix print debugger line prefix prior to indentation * * SYNOPSIS * * static VOID DoPrefix (_line_) * int _line_; * * DESCRIPTION * * Print prefix common to all debugger output lines, prior to * doing indentation if necessary. Print such information as * current process name, current source file name and line number, * and current function nesting depth. * */ static void DoPrefix(uint _line_) { CODE_STATE *state; state = code_state(); state->lineno++; if (_gu_db_stack->flags & PID_ON) { #ifdef THREAD (void) fprintf(_gu_db_fp_, "%5d:(thread %lu):", (int)getpid(), (unsigned long)pthread_self()); #else (void) fprintf(_gu_db_fp_, "%5d: ", (int) getpid()); #endif /* THREAD */ } if (_gu_db_stack->flags & NUMBER_ON) { (void) fprintf(_gu_db_fp_, "%5d: ", state->lineno); } if (_gu_db_stack->flags & PROCESS_ON) { (void) fprintf(_gu_db_fp_, "%s: ", _gu_db_process_); } if (_gu_db_stack->flags & FILE_ON) { (void) fprintf(_gu_db_fp_, "%14s: ", BaseName(state->file)); } if (_gu_db_stack->flags & LINE_ON) { (void) fprintf(_gu_db_fp_, "%5d: ", _line_); } if (_gu_db_stack->flags & DEPTH_ON) { (void) fprintf(_gu_db_fp_, "%4d: ", state->level); } } /* * FUNCTION * * GU_DBUGOpenFile open new output stream for debugger output * * SYNOPSIS * * static VOID GU_DBUGOpenFile (name) * char *name; * * DESCRIPTION * * Given name of a new file (or "-" for stdout) opens the file * and sets the output stream to the new file. * */ static void GU_DBUGOpenFile(const char *name, int append) { REGISTER FILE *fp; REGISTER BOOLEAN newfile; if (name != NULL) { strcpy(_gu_db_stack->name, name); if (strlen(name) == 1 && name[0] == '-') { _gu_db_fp_ = stdout; _gu_db_stack->out_file = _gu_db_fp_; _gu_db_stack->flags |= FLUSH_ON_WRITE; } else { if (!Writable((char *) name)) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_, name); perror(""); fflush(stderr); } else { newfile = !EXISTS(name); if (!(fp = fopen(name, append ? "a+" : "w"))) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_, name); perror(""); fflush(stderr); } else { _gu_db_fp_ = fp; _gu_db_stack->out_file = fp; if (newfile) { ChangeOwner(name); } } } } } } /* * FUNCTION * * OpenProfile open new output stream for profiler output * * SYNOPSIS * * static FILE *OpenProfile (name) * char *name; * * DESCRIPTION * * Given name of a new file, opens the file * and sets the profiler output stream to the new file. * * It is currently unclear whether the prefered behavior is * to truncate any existing file, or simply append to it. * The latter behavior would be desirable for collecting * accumulated runtime history over a number of separate * runs. It might take some changes to the analyzer program * though, and the notes that Binayak sent with the profiling * diffs indicated that append was the normal mode, but this * does not appear to agree with the actual code. I haven't * investigated at this time [fnf; 24-Jul-87]. */ #ifndef THREAD static FILE * OpenProfile(const char *name) { REGISTER FILE *fp; REGISTER BOOLEAN newfile; fp = 0; if (!Writable(name)) { (void) fprintf(_gu_db_fp_, ERR_OPEN, _gu_db_process_, name); perror(""); dbug_flush(0); (void) Delay(_gu_db_stack->delay); } else { newfile = !EXISTS(name); if (!(fp = fopen(name, "w"))) { (void) fprintf(_gu_db_fp_, ERR_OPEN, _gu_db_process_, name); perror(""); dbug_flush(0); } else { _gu_db_pfp_ = fp; _gu_db_stack->prof_file = fp; if (newfile) { ChangeOwner(name); } } } return fp; } #endif /* * FUNCTION * * CloseFile close the debug output stream * * SYNOPSIS * * static VOID CloseFile (fp) * FILE *fp; * * DESCRIPTION * * Closes the debug output stream unless it is standard output * or standard error. * */ static void CloseFile(FILE * fp) { if (fp != stderr && fp != stdout) { if (fclose(fp) == EOF) { pthread_mutex_lock(&_gu_db_mutex); (void) fprintf(_gu_db_fp_, ERR_CLOSE, _gu_db_process_); perror(""); dbug_flush(0); } } } /* * FUNCTION * * DbugExit print error message and exit * * SYNOPSIS * * static VOID DbugExit (why) * char *why; * * DESCRIPTION * * Prints error message using current process name, the reason for * aborting (typically out of memory), and exits with status 1. * This should probably be changed to use a status code * defined in the user's debugger include file. * */ static void DbugExit(const char *why) { (void) fprintf(stderr, ERR_ABORT, _gu_db_process_, why); (void) fflush(stderr); exit(1); } /* * FUNCTION * * DbugMalloc allocate memory for debugger runtime support * * SYNOPSIS * * static long *DbugMalloc (size) * int size; * * DESCRIPTION * * Allocate more memory for debugger runtime support functions. * Failure to to allocate the requested number of bytes is * immediately fatal to the current process. This may be * rather unfriendly behavior. It might be better to simply * print a warning message, freeze the current debugger state, * and continue execution. * */ static char * DbugMalloc(int size) { register char *new_malloc; if (!(new_malloc = (char *) malloc((unsigned int) size))) DbugExit("out of memory"); return (new_malloc); } /* * As strtok but two separators in a row are changed to one * separator (to allow directory-paths in dos). */ static char * static_strtok(char *s1, char separator) { static char *end = NULL; register char *rtnval, *cpy; rtnval = NULL; if (s1 != NULL) end = s1; if (end != NULL && *end != EOS) { rtnval = cpy = end; do { if ((*cpy++ = *end++) == separator) { if (*end != separator) { cpy--; /* Point at separator */ break; } end++; /* Two separators in a row, skipp one */ } } while (*end != EOS); *cpy = EOS; /* Replace last separator */ } return (rtnval); } /* * FUNCTION * * BaseName strip leading pathname components from name * * SYNOPSIS * * static char *BaseName (pathname) * char *pathname; * * DESCRIPTION * * Given pointer to a complete pathname, locates the base file * name at the end of the pathname and returns a pointer to * it. * */ static char * BaseName(const char *pathname) { register const char *base; base = strrchr(pathname, FN_LIBCHAR); // if (base++ == NullS) - this doesn't make sense if (NULL == base || '\0' == base[1]) base = pathname; return ((char *) base); } /* * FUNCTION * * Writable test to see if a pathname is writable/creatable * * SYNOPSIS * * static BOOLEAN Writable (pathname) * char *pathname; * * DESCRIPTION * * Because the debugger might be linked in with a program that * runs with the set-uid-bit (suid) set, we have to be careful * about opening a user named file for debug output. This consists * of checking the file for write access with the real user id, * or checking the directory where the file will be created. * * Returns TRUE if the user would normally be allowed write or * create access to the named file. Returns FALSE otherwise. * */ #ifndef Writable static BOOLEAN Writable(char *pathname) { REGISTER BOOLEAN granted; REGISTER char *lastslash; granted = FALSE; if (EXISTS(pathname)) { if (WRITABLE(pathname)) { granted = TRUE; } } else { lastslash = strrchr(pathname, '/'); if (lastslash != NULL) { *lastslash = EOS; } else { pathname = "."; } if (WRITABLE(pathname)) { granted = TRUE; } if (lastslash != NULL) { *lastslash = '/'; } } return (granted); } #endif /* * FUNCTION * * ChangeOwner change owner to real user for suid programs * * SYNOPSIS * * static VOID ChangeOwner (pathname) * * DESCRIPTION * * For unix systems, change the owner of the newly created debug * file to the real owner. This is strictly for the benefit of * programs that are running with the set-user-id bit set. * * Note that at this point, the fact that pathname represents * a newly created file has already been established. If the * program that the debugger is linked to is not running with * the suid bit set, then this operation is redundant (but * harmless). * */ #ifndef ChangeOwner static void ChangeOwner(char *pathname) { if (chown(pathname, getuid(), getgid()) == -1) { (void) fprintf(stderr, ERR_CHOWN, _gu_db_process_, pathname); perror(""); (void) fflush(stderr); } } #endif /* * FUNCTION * * _gu_db_setjmp_ save debugger environment * * SYNOPSIS * * VOID _gu_db_setjmp_ () * * DESCRIPTION * * Invoked as part of the user's GU_DBUG_SETJMP macro to save * the debugger environment in parallel with saving the user's * environment. * */ #ifdef HAVE_LONGJMP void _gu_db_setjmp_() { CODE_STATE *state; state = code_state(); state->jmplevel = state->level; state->jmpfunc = state->func; state->jmpfile = state->file; } /* * FUNCTION * * _gu_db_longjmp_ restore previously saved debugger environment * * SYNOPSIS * * VOID _gu_db_longjmp_ () * * DESCRIPTION * * Invoked as part of the user's GU_DBUG_LONGJMP macro to restore * the debugger environment in parallel with restoring the user's * previously saved environment. * */ void _gu_db_longjmp_() { CODE_STATE *state; state = code_state(); state->level = state->jmplevel; if (state->jmpfunc) { state->func = state->jmpfunc; } if (state->jmpfile) { state->file = state->jmpfile; } } #endif /* * FUNCTION * * DelayArg convert D flag argument to appropriate value * * SYNOPSIS * * static int DelayArg (value) * int value; * * DESCRIPTION * * Converts delay argument, given in tenths of a second, to the * appropriate numerical argument used by the system to delay * that that many tenths of a second. For example, on the * amiga, there is a system call "Delay()" which takes an * argument in ticks (50 per second). On unix, the sleep * command takes seconds. Thus a value of "10", for one * second of delay, gets converted to 50 on the amiga, and 1 * on unix. Other systems will need to use a timing loop. * */ #ifdef AMIGA #define HZ (50) /* Probably in some header somewhere */ #endif static int DelayArg(int value) { uint delayarg = 0; #if (unix || xenix) delayarg = value / 10; /* Delay is in seconds for sleep () */ #endif #ifdef AMIGA delayarg = (HZ * value) / 10; /* Delay in ticks for Delay () */ #endif return (delayarg); } /* * A dummy delay stub for systems that do not support delays. * With a little work, this can be turned into a timing loop. */ #if ! defined(Delay) && ! defined(AMIGA) static int Delay(int ticks) { return ticks; } #endif /* * FUNCTION * * perror perror simulation for systems that don't have it * * SYNOPSIS * * static VOID perror (s) * char *s; * * DESCRIPTION * * Perror produces a message on the standard error stream which * provides more information about the library or system error * just encountered. The argument string s is printed, followed * by a ':', a blank, and then a message and a newline. * * An undocumented feature of the unix perror is that if the string * 's' is a null string (NOT a NULL pointer!), then the ':' and * blank are not printed. * * This version just complains about an "unknown system error". * */ /* flush dbug-stream, free mutex lock & wait delay */ /* This is because some systems (MSDOS!!) dosn't flush fileheader */ /* and dbug-file isn't readable after a system crash !! */ static void dbug_flush(CODE_STATE * state) { #ifndef THREAD if (_gu_db_stack->flags & FLUSH_ON_WRITE) #endif { #if defined(MSDOS) || defined(__WIN__) if (_gu_db_fp_ != stdout && _gu_db_fp_ != stderr) { if (!(freopen(_gu_db_stack->name, "a", _gu_db_fp_))) { (void) fprintf(stderr, ERR_OPEN, _gu_db_process_,_gu_db_stack->name); fflush(stderr); _gu_db_fp_ = stdout; _gu_db_stack->out_file = _gu_db_fp_; _gu_db_stack->flags |= FLUSH_ON_WRITE; } } else #endif { (void) fflush(_gu_db_fp_); if (_gu_db_stack->delay) (void) Delay(_gu_db_stack->delay); } } if (!state || !state->locked) pthread_mutex_unlock(&_gu_db_mutex); } /* dbug_flush */ void _gu_db_lock_file() { CODE_STATE *state; state = code_state(); pthread_mutex_lock(&_gu_db_mutex); state->locked = 1; } void _gu_db_unlock_file() { CODE_STATE *state; state = code_state(); state->locked = 0; pthread_mutex_unlock(&_gu_db_mutex); } /* * Here we need the definitions of the clock routine. Add your * own for whatever system that you have. */ #ifndef THREAD #if defined(HAVE_GETRUSAGE) #include #include /* extern int getrusage(int, struct rusage *); */ /* * Returns the user time in milliseconds used by this process so * far. */ static unsigned long Clock() { struct rusage ru; (void) getrusage(RUSAGE_SELF, &ru); return ((ru.ru_utime.tv_sec * 1000) + (ru.ru_utime.tv_usec / 1000)); } #elif defined(MSDOS) || defined(__WIN__) || defined(OS2) static ulong Clock() { return clock() * (1000 / Cmy_pthread_mutex_lockS_PER_SEC); } #elif defined (amiga) struct DateStamp { /* Yes, this is a hack, but doing it right */ long ds_Days; /* is incredibly ugly without splitting this */ long ds_Minute; /* off into a separate file */ long ds_Tick; }; static int first_clock = TRUE; static struct DateStamp begin; static struct DateStamp elapsed; static unsigned long Clock() { register struct DateStamp *now; register unsigned long millisec = 0; extern VOID *AllocMem(); now = (struct DateStamp *) AllocMem((long) sizeof(struct DateStamp), 0L); if (now != NULL) { if (first_clock == TRUE) { first_clock = FALSE; (void) DateStamp(now); begin = *now; } (void) DateStamp(now); millisec = 24 * 3600 * (1000 / HZ) * (now->ds_Days - begin.ds_Days); millisec += 60 * (1000 / HZ) * (now->ds_Minute - begin.ds_Minute); millisec += (1000 / HZ) * (now->ds_Tick - begin.ds_Tick); (void) FreeMem(now, (long) sizeof(struct DateStamp)); } return (millisec); } #else static unsigned long Clock() { return (0); } #endif /* RUSAGE */ #endif /* THREADS */ #ifdef NO_VARARGS /* * Fake vfprintf for systems that don't support it. If this * doesn't work, you are probably SOL... */ static int vfprintf(stream, format, ap) FILE *stream; char *format; va_list ap; { int rtnval; ARGS_DCL; ARG0 = va_arg(ap, ARGS_TYPE); ARG1 = va_arg(ap, ARGS_TYPE); ARG2 = va_arg(ap, ARGS_TYPE); ARG3 = va_arg(ap, ARGS_TYPE); ARG4 = va_arg(ap, ARGS_TYPE); ARG5 = va_arg(ap, ARGS_TYPE); ARG6 = va_arg(ap, ARGS_TYPE); ARG7 = va_arg(ap, ARGS_TYPE); ARG8 = va_arg(ap, ARGS_TYPE); ARG9 = va_arg(ap, ARGS_TYPE); rtnval = fprintf(stream, format, ARGS_LIST); return (rtnval); } #endif /* NO_VARARGS */ char _gu_dig_vec[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; galera-4-26.4.25/galerautils/src/gu_assert.h000644 000164 177776 00000001042 15107057155 021761 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy /** * @file Assert macro definition * * $Id$ */ #ifndef _gu_assert_h_ #define _gu_assert_h_ #include "gu_log.h" #ifndef DEBUG_ASSERT #include #else #include #undef assert /** Assert that sleeps instead of aborting the program, saving it for gdb */ #define assert(expr) if (!(expr)) { \ gu_fatal ("Assertion (%s) failed", __STRING(expr)); \ while(1) sleep(1); } #endif /* DEBUG_ASSERT */ #endif /* _gu_assert_h_ */ galera-4-26.4.25/galerautils/src/gu_mmap.cpp000644 000164 177776 00000006673 15107057155 021764 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy * * $Id$ */ #include "gu_mmap.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include "gu_macros.hpp" #include "gu_limits.h" // GU_PAGE_SIZE #include #include #if defined(__FreeBSD__) && defined(MAP_NORESERVE) /* FreeBSD has never implemented this flags and will deprecate it. */ #undef MAP_NORESERVE #endif #ifndef MAP_NORESERVE #define MAP_NORESERVE 0 #endif // to avoid -Wold-style-cast extern "C" { static const void* const GU_MAP_FAILED = MAP_FAILED; } namespace gu { MMap::MMap (const FileDescriptor& fd, bool const sequential) : size (fd.size()), ptr (mmap (NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NORESERVE, fd.get(), 0)), mapped (ptr != GU_MAP_FAILED) { if (!mapped) { gu_throw_system_error(errno) << "mmap() on '" << fd.name() << "' failed"; } #if defined(MADV_DONTFORK) if (posix_madvise (ptr, size, MADV_DONTFORK)) { # define MMAP_INHERIT_OPTION "MADV_DONTFORK" #elif defined(__FreeBSD__) if (minherit (ptr, size, INHERIT_NONE)) { # define MMAP_INHERIT_OPTION "INHERIT_NONE" #endif #if defined(MMAP_INHERIT_OPTION) int const err(errno); log_warn << "Failed to set " MMAP_INHERIT_OPTION " on " << fd.name() << ": " << err << " (" << strerror(err) << ")"; } #endif /* benefits are questionable */ if (sequential && posix_madvise (ptr, size, MADV_SEQUENTIAL)) { int const err(errno); log_warn << "Failed to set MADV_SEQUENTIAL on " << fd.name() << ": " << err << " (" << strerror(err) << ")"; } log_debug << "Memory mapped: " << ptr << " (" << size << " bytes)"; } void MMap::dont_need() const { if (posix_madvise(reinterpret_cast(ptr), size, MADV_DONTNEED)) { log_warn << "Failed to set MADV_DONTNEED on " << ptr << ": " << errno << " (" << strerror(errno) << ')'; } } void MMap::sync(void* const addr, size_t const length) const { /* libc msync() only accepts addresses multiple of page size, * rounding down */ static uint64_t const PAGE_SIZE_MASK(~(GU_PAGE_SIZE - 1)); uint8_t* const sync_addr(reinterpret_cast (uint64_t(addr) & PAGE_SIZE_MASK)); size_t const sync_length (length + (static_cast(addr) - sync_addr)); if (::msync(sync_addr, sync_length, MS_SYNC) < 0) { gu_throw_system_error(errno) << "msync(" << sync_addr << ", " << sync_length << ") failed"; } } void MMap::sync () const { log_info << "Flushing memory map to disk..."; sync(ptr, size); } void MMap::unmap () { // Do logging before munmap because it makes ptr invalid memory on success log_debug << "Unmapping memory: " << ptr << "(" << size <<" bytes)"; if (munmap (ptr, size) < 0) { gu_throw_system_error(errno) << "munmap(" << ptr << ", " << size << ") failed"; } mapped = false; } MMap::~MMap () { if (mapped) { try { unmap(); } catch (Exception& e) { log_error << e.what(); } } } } galera-4-26.4.25/galerautils/src/gu_fnv.h000644 000164 177776 00000013045 15107057155 021257 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /*! * @file * * This header file defines FNV hash functions for 3 hash sizes: * 4, 8 and 16 bytes. * * Be wary of bitshift multiplication "optimization" (FNV_BITSHIFT_OPTIMIZATION): * FNV authors used to claim marginal speedup when using it, however on core2 * CPU it has shown no speedup for fnv32a and more than 2x slowdown for fnv64a * and fnv128a. Disabled by default. * * FNV vs. FNVa: FNVa has a better distribution: multiplication happens after * XOR and hence propagates XOR effect to all bytes of the hash. * Hence by default functions perform FNVa. GU_FNV_NORMAL macro * is needed for unit tests. * * gu_fnv*_internal() functions are endian-unsafe, their output should be * converted to little-endian format if it is to be exported to other machines. */ #ifndef _gu_fnv_h_ #define _gu_fnv_h_ #include "gu_int128.h" #include #include // ssize_t #include #define GU_FNV32_PRIME 16777619UL #define GU_FNV32_SEED 2166136261UL #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV32_MUL(_x) _x *= GU_FNV32_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV32_MUL(_x) \ _x += (_x << 1) + (_x << 4) + (_x << 7) + (_x << 8) + (_x << 24) #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #if !defined(GU_FNV_NORMAL) # define GU_FNV32_ITERATION(_s,_b) _s ^= _b; GU_FNV32_MUL(_s); #else # define GU_FNV32_ITERATION(_s,_b) GU_FNV32_MUL(_s); _s ^= _b; #endif static GU_FORCE_INLINE void gu_fnv32a_internal (const void* buf, ssize_t const len, uint32_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; while (bp + 2 <= be) { GU_FNV32_ITERATION(*seed,*bp++); GU_FNV32_ITERATION(*seed,*bp++); } if (bp < be) { GU_FNV32_ITERATION(*seed,*bp++); } assert(be == bp); } #define GU_FNV64_PRIME 1099511628211ULL #define GU_FNV64_SEED 14695981039346656037ULL #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV64_MUL(_x) _x *= GU_FNV64_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV64_MUL(_x) \ _x +=(_x << 1) + (_x << 4) + (_x << 5) + (_x << 7) + (_x << 8) + (_x << 40); #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #if !defined(GU_FNV_NORMAL) # define GU_FNV64_ITERATION(_s,_b) _s ^= _b; GU_FNV64_MUL(_s); #else # define GU_FNV64_ITERATION(_s,_b) GU_FNV64_MUL(_s); _s ^= _b; #endif static GU_FORCE_INLINE void gu_fnv64a_internal (const void* buf, ssize_t const len, uint64_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; while (bp + 2 <= be) { GU_FNV64_ITERATION(*seed,*bp++); GU_FNV64_ITERATION(*seed,*bp++); } if (bp < be) { GU_FNV64_ITERATION(*seed,*bp++); } assert(be == bp); } static gu_uint128_t const GU_SET128(GU_FNV128_PRIME, 0x0000000001000000ULL, 0x000000000000013BULL); static gu_uint128_t const GU_SET128(GU_FNV128_SEED, 0x6C62272E07BB0142ULL, 0x62B821756295C58DULL); #if defined(__SIZEOF_INT128__) #define GU_FNV128_XOR(_s,_b) _s ^= _b #if !defined(GU_FNVBITSHIFT_OPTIMIZATION) # define GU_FNV128_MUL(_x) _x *= GU_FNV128_PRIME #else /* GU_FNVBITSHIFT_OPTIMIZATION */ # define GU_FNV128_MUL(_x) \ _x +=(_x << 1) + (_x << 3) + (_x << 4) + (_x << 5) + (_x << 8) + (_x << 88); #endif /* GU_FNVBITSHIFT_OPTIMIZATION */ #else /* ! __SIZEOF_INT128__ */ #define GU_FNV128_XOR(_s,_b) (_s).u32[GU_32LO] ^= _b #if defined(GU_FNV128_FULL_MULTIPLICATION) # define GU_FNV128_MUL(_x) GU_MUL128_INPLACE(_x, GU_FNV128_PRIME) #else /* no FULL_MULTIPLICATION */ # define GU_FNV128_MUL(_x) { \ uint32_t carry = \ (((_x).u64[GU_64LO] & 0x00000000ffffffffULL) * 0x013b) >> 32; \ carry = (((_x).u64[GU_64LO] >> 32) * 0x013b + carry) >> 32; \ (_x).u64[GU_64HI] *= 0x013b; \ (_x).u64[GU_64HI] += ((_x).u64[GU_64LO] << 24) + carry; \ (_x).u64[GU_64LO] *= 0x013b; \ } #endif /* FULL_MULTIPLICATION */ #endif /* ! __SIZEOF_INT128__ */ #if !defined(GU_FNV_NORMAL) # define GU_FNV128_ITERATION(_s,_b) GU_FNV128_XOR(_s,_b); GU_FNV128_MUL(_s); #else # define GU_FNV128_ITERATION(_s,_b) GU_FNV128_MUL(_s); GU_FNV128_XOR(_s,_b); #endif inline static void gu_fnv128a_internal (const void* buf, ssize_t const len, gu_uint128_t* seed) { const uint8_t* bp = (const uint8_t*)buf; const uint8_t* const be = bp + len; /* this manual loop unrolling seems to be essential */ while (bp + 8 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp + 4 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp + 2 <= be) { GU_FNV128_ITERATION(*seed, *bp++); GU_FNV128_ITERATION(*seed, *bp++); } if (bp < be) { GU_FNV128_ITERATION(*seed, *bp++); } assert(be == bp); } #endif /* _gu_fnv_h_ */ galera-4-26.4.25/galerautils/src/gu_time.c000644 000164 177776 00000014422 15107057155 021417 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file time manipulation functions/macros * * $Id: $ */ #if defined(__APPLE__) #include #include // struct timespec #include // gettimeofday #include // clock_get_time #include // host_get_clock_service #include // mach_absolute_time, mach_timebase_info #include #include "gu_time.h" #define NSEC_PER_SEC 1000000000 #define NSEC_PER_USEC 1000 # if defined(__LP64__) // OS X comm page time offsets // see http://www.opensource.apple.com/source/xnu/xnu-2050.22.13/osfmk/i386/cpu_capabilities.h #define nt_tsc_base "0x50" #define nt_scale "0x58" #define nt_shift "0x5c" #define nt_ns_base "0x60" #define nt_generation "0x68" #define gtod_generation "0x6c" #define gtod_ns_base "0x70" #define gtod_sec_base "0x78" static inline int64_t nanotime (void) { int64_t ntime; __asm volatile ( "mov $0x7fffffe00000, %%rbx;" /* comm page base */ "0:" /* Loop trying to take a consistent snapshot of the time parameters. */ "movl "gtod_generation"(%%rbx), %%r8d;" "testl %%r8d, %%r8d;" "jz 1f;" "movl "nt_generation"(%%rbx), %%r9d;" "testl %%r9d, %%r9d;" "jz 0b;" "rdtsc;" "movq "nt_tsc_base"(%%rbx), %%r10;" "movl "nt_scale"(%%rbx), %%r11d;" "movq "nt_ns_base"(%%rbx), %%r12;" "cmpl "nt_generation"(%%rbx), %%r9d;" "jne 0b;" "movq "gtod_ns_base"(%%rbx), %%r13;" "movq "gtod_sec_base"(%%rbx), %%r14;" "cmpl "gtod_generation"(%%rbx), %%r8d;" "jne 0b;" /* Gathered all the data we need. Compute time. */ /* ((tsc - nt_tsc_base) * nt_scale) >> 32 + nt_ns_base - gtod_ns_base + gtod_sec_base*1e9 */ /* The multiply and shift extracts the top 64 bits of the 96-bit product. */ "shlq $32, %%rdx;" "addq %%rdx, %%rax;" "subq %%r10, %%rax;" "mulq %%r11;" "shrdq $32, %%rdx, %%rax;" "addq %%r12, %%rax;" "subq %%r13, %%rax;" "imulq $1000000000, %%r14;" "addq %%r14, %%rax;" "jmp 2f;" "1:" /* Fall back to system call (usually first call in this thread). */ "movq %%rsp, %%rdi;" /* rdi must be non-nil, unused */ "movq $0, %%rsi;" "movl $(0x2000000+116), %%eax;" /* SYS_gettimeofday */ "syscall; /* may destroy rcx and r11 */" /* sec is in rax, usec in rdx */ /* return nsec in rax */ "imulq $1000000000, %%rax;" "imulq $1000, %%rdx;" "addq %%rdx, %%rax;" "2:" : "=a"(ntime) : /* no input parameters */ : "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14" ); return ntime; } static inline int64_t nanouptime (void) { int64_t ntime; __asm volatile ( "movabs $0x7fffffe00000, %%rbx;" /* comm page base */ "0:" /* Loop trying to take a consistent snapshot of the time parameters. */ "movl "nt_generation"(%%rbx), %%r9d;" "testl %%r9d, %%r9d;" "jz 0b;" "rdtsc;" "movq "nt_tsc_base"(%%rbx), %%r10;" "movl "nt_scale"(%%rbx), %%r11d;" "movq "nt_ns_base"(%%rbx), %%r12;" "cmpl "nt_generation"(%%rbx), %%r9d;" "jne 0b;" /* Gathered all the data we need. Compute time. */ /* ((tsc - nt_tsc_base) * nt_scale) >> 32 + nt_ns_base */ /* The multiply and shift extracts the top 64 bits of the 96-bit product. */ "shlq $32, %%rdx;" "addq %%rdx, %%rax;" "subq %%r10, %%rax;" "mulq %%r11;" "shrdq $32, %%rdx, %%rax;" "addq %%r12, %%rax;" : "=a"(ntime) : /* no input parameters */ : "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "%r9", "%r10", "%r11", "%r12" ); return ntime; } int clock_gettime (clockid_t clk_id, struct timespec * tp) { int64_t abstime = 0; if (tp == NULL) { return EFAULT; } switch (clk_id) { case CLOCK_REALTIME: abstime = nanotime (); break; case CLOCK_MONOTONIC: abstime = nanouptime (); break; default: errno = EINVAL; return -1; } tp->tv_sec = abstime / (uint64_t)NSEC_PER_SEC; tp->tv_nsec = (uint32_t)(abstime % (uint64_t)NSEC_PER_SEC); return 0; } #else /* !__LP64__ */ static struct mach_timebase_info g_mti; int clock_gettime (clockid_t clk_id, struct timespec * tp) { int64_t abstime = 0; mach_timebase_info_data_t mti; /* {uint32_t numer, uint32_t denom} */ if (tp == NULL) { return EFAULT; } switch (clk_id) { case CLOCK_REALTIME: struct timeval tv; if (gettimeofday (&tv, NULL) != 0) { return -1; } tp->tv_sec = tv.tv_sec; tp->tv_nsec = tv.tv_usec * NSEC_PER_USEC; return 0; case CLOCK_MONOTONIC: abstime = mach_absolute_time (); break; default: errno = EINVAL; return -1; } if (g_mti.denom == 0) { struct mach_timebase_info mti; mach_timebase_info (&mti); g_mti.numer = mti.numer; OSMemoryBarrier (); g_mti.denom = mti.denom; } nanos = (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom))); tp->tv_sec = nanos / (uint64_t)NSEC_PER_SEC; tp->tv_nsec = (uint32_t)(nanos % (uint64_t)NSEC_PER_SEC); return 0; } #endif /* !__LP64__ */ #else /* !__APPLE__ */ #ifdef __GNUC__ // error: ISO C forbids an empty translation unit int dummy_var_gu_time; #endif #endif /* __APPLE__ */ galera-4-26.4.25/galerautils/src/gu_alloc.hpp000644 000164 177776 00000012361 15107057155 022120 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2016 Codership Oy */ /*! * @file Continuous buffer allocator for RecordSet * * $Id$ */ #ifndef _GU_ALLOC_HPP_ #define _GU_ALLOC_HPP_ #include "gu_string.hpp" #include "gu_fdesc.hpp" #include "gu_mmap.hpp" #include "gu_buf.hpp" #include "gu_vector.hpp" #include "gu_macros.h" // gu_likely() #include // realloc(), free() #include #include namespace gu { class Allocator { public: class BaseName { public: virtual void print(std::ostream& os) const = 0; virtual ~BaseName() {} }; // this questionable optimization reduces Allocator size by 8 // probably not worth the loss of generality. typedef unsigned int page_size_type; // max page size typedef page_size_type heap_size_type; // max heap store size explicit Allocator (const BaseName& base_name = BASE_NAME_DEFAULT, void* reserved = NULL, page_size_type reserved_size = 0, heap_size_type max_heap = (1U << 22), /* 4M */ page_size_type disk_page_size = (1U << 26)); /* 64M */ ~Allocator (); /*! @param new_page - true if not adjucent to previous allocation */ byte_t* alloc (page_size_type const size, bool& new_page); /* Total allocated size */ size_t size () const { return size_; } /* Total count of pages */ size_t count() const { return pages_->size(); } #ifdef GU_ALLOCATOR_DEBUG /* appends own vector of Buf structures to the passed one, * should be called only after all allocations have been made. * returns sum of all appended buffers' sizes (same as size()) */ size_t gather (std::vector& out) const; #endif /* GU_ALLOCATOR_DEBUG */ /* After we allocated 3 heap pages, spilling vector into heap should not * be an issue. */ static size_t const INITIAL_VECTOR_SIZE = 4; private: class Page /* base class for memory and file pages */ { public: Page (void* ptr, size_t size) : base_ptr_(static_cast(ptr)), ptr_ (base_ptr_), left_ (size) {} virtual ~Page() {}; byte_t* alloc (size_t size) { byte_t* ret = NULL; if (gu_likely(size <= left_)) { ret = ptr_; ptr_ += size; left_ -= size; } return ret; } const byte_t* base() const { return base_ptr_; } ssize_t size() const { return ptr_ - base_ptr_; } protected: byte_t* base_ptr_; byte_t* ptr_; page_size_type left_; Page& operator=(const Page&); Page (const Page&); }; class HeapPage : public Page { public: HeapPage (page_size_type max_size); ~HeapPage () { free (base_ptr_); } }; class FilePage : public Page { public: FilePage (const std::string& name, page_size_type size); ~FilePage () { fd_.unlink(); } private: FileDescriptor fd_; MMap mmap_; }; class PageStore { public: Page* new_page (page_size_type size) { return my_new_page(size); } protected: virtual ~PageStore() {} private: virtual Page* my_new_page (page_size_type size) = 0; }; class HeapStore : public PageStore { public: HeapStore (heap_size_type max) : PageStore(), left_(max) {} ~HeapStore () {} private: heap_size_type left_; Page* my_new_page (page_size_type const size); }; class FileStore : public PageStore { public: FileStore (const BaseName& base_name, page_size_type page_size) : PageStore(), base_name_(base_name), page_size_(page_size), n_ (0) {} ~FileStore() {} const BaseName& base_name() const { return base_name_; } int size() const { return n_; } private: const BaseName& base_name_; page_size_type const page_size_; int n_; Page* my_new_page (page_size_type const size); FileStore (const FileStore&); FileStore& operator= (const FileStore&); }; Page first_page_; Page* current_page_; HeapStore heap_store_; FileStore file_store_; PageStore* current_store_; gu::Vector pages_; #ifdef GU_ALLOCATOR_DEBUG gu::Vector bufs_; void add_current_to_bufs(); #endif /* GU_ALLOCATOR_DEBUG */ size_t size_; Allocator(const gu::Allocator&); const Allocator& operator=(const gu::Allocator&); class BaseNameDefault : public BaseName { public: BaseNameDefault() {} // this is seemingly required by the standard void print(std::ostream& os) const { os << "alloc"; } }; static BaseNameDefault const BASE_NAME_DEFAULT; }; /* class Allocator */ inline std::ostream& operator<< (std::ostream& os, const Allocator::BaseName& bn) { bn.print(os); return os; } } /* namespace gu */ #endif /* _GU_ALLOC_HPP_ */ galera-4-26.4.25/galerautils/src/gu_arch.h000644 000164 177776 00000003667 15107057155 021414 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2017 Codership Oy /** * @file CPU architecture related functions/macros * * $Id$ */ #ifndef _gu_arch_h_ #define _gu_arch_h_ #if defined(HAVE_ENDIAN_H) # include #elif defined(HAVE_SYS_ENDIAN_H) /* FreeBSD */ # include #elif defined(HAVE_SYS_BYTEORDER_H) # include #elif defined(__APPLE__) # include #else # error "No byte order header file detected" #endif #if defined(__BYTE_ORDER) # if __BYTE_ORDER == __LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(_BYTE_ORDER) /* FreeBSD */ # if _BYTE_ORDER == _LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(__APPLE__) && defined(__DARWIN_BYTE_ORDER) # if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN # define GU_LITTLE_ENDIAN # endif #elif defined(__sun__) # if !defined(_BIG_ENDIAN) # define GU_LITTLE_ENDIAN # endif #else # error "Byte order not defined" #endif #if defined(__sun__) # if defined (_LP64) # define GU_WORDSIZE 64 # else # define GU_WORDSIZE 32 # endif #elif defined(__APPLE__) || defined(__FreeBSD__) # include # define GU_WORDSIZE __WORDSIZE #else # include # define GU_WORDSIZE __WORDSIZE #endif #include #if (GU_WORDSIZE == 32) typedef uint32_t gu_word_t; #elif (GU_WORDSIZE == 64) typedef uint64_t gu_word_t; #else # error "Unsupported wordsize" #endif #define GU_WORD_BYTES sizeof(gu_word_t) #include #ifdef __cpluplus // to avoid "old-style cast" in C++ make it temp instantiation #define GU_ASSERT_ALIGNMENT(x) \ assert((uintptr_t(&(x)) % sizeof(x)) == 0 || \ (uintptr_t(&(x)) % GU_WORD_BYTES) == 0) #else // ! __cplusplus #define GU_ASSERT_ALIGNMENT(x) \ assert(((uintptr_t)(&(x)) % sizeof(x)) == 0 || \ ((uintptr_t)(&(x)) % GU_WORD_BYTES) == 0) #endif // !__cplusplus #endif /* _gu_arch_h_ */ galera-4-26.4.25/galerautils/src/gu_vector.hpp000644 000164 177776 00000012416 15107057155 022331 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2019 Codership Oy /*! * @file implementation of STL vector functionality "on the stack", that is * with initial buffer for allocations reserved inside the object: * * gu::Vector v; * v().resize(5); // uses internal buffer (in this case on the stack) * v().resize(20); // overflows into heap * * In many cases, when the number of elements in a vector is predictably low or * even known exactly, this will save us from going to heap just to allocate * few elements. * * Rather than manually rewriting all std::vector methods, we return * a reference to std::vector object via operator(). operator[] is also * rewritten to provide the familiar v[i] interface. * * $Id$ */ #ifndef _GU_VECTOR_HPP_ #define _GU_VECTOR_HPP_ #include "gu_reserved_container.hpp" #include namespace gu { /* gu::VectorBase is an interface to generalize gu::Vector template over * capacity so that it is possible to pass gu::Vector objects * by reference to gu::VectorBase */ template class VectorBase { public: typedef T value_type; typedef T& reference; typedef const T& const_reference; typedef typename ReservedAllocator::size_type size_type; virtual reference operator[] (size_type i) = 0; virtual const_reference operator[] (size_type i) const = 0; virtual size_type size () const = 0; virtual void reserve (size_type n) = 0; virtual void resize (size_type n, value_type val = value_type()) = 0; virtual void push_back(const value_type& val) = 0; reference front() { return operator[](0); } const_reference front() const { return operator[](0); } reference back () { return operator[](size() - 1); } const_reference back () const { return operator[](size() - 1); } // Now iterators, which I have no time for ATM. Leaving unfinished. protected: VectorBase() {} virtual ~VectorBase() {} }; /* a base class to be used as a member of other classes */ template ::size_type capacity> class Vector { public: typedef typename VectorBase::size_type size_type; typedef typename VectorBase::value_type value_type; Vector() : rv_() {} Vector(const Vector& other) : rv_() { rv_().assign(other().begin(), other().end()); } Vector& operator= (Vector other) { using namespace std; swap(other); return *this; } typedef ReservedAllocator Allocator; typedef std::vector ContainerType; ContainerType& operator() () { return container(); } const ContainerType& operator() () const { return container(); } ContainerType* operator-> () { return &container(); } const ContainerType* operator-> () const { return &container(); } T& operator[] (size_type i) { return container()[i]; } const T& operator[] (size_type i) const { return container()[i]; } size_type size () const { return container().size(); } void reserve (size_type n) { container().reserve(n); } void resize (size_type n, value_type val = value_type()) { container().resize(n, val); } void push_back (const value_type& val) { container().push_back(val); }; T& front() { return container().front(); } const T& front() const { return container().front(); } T& back () { return container().back(); } const T& back () const { return container().back(); } /* this mehtod must be specialized for each template instantiation */ size_type serialize(void* buf, size_type size, size_type offset = 0); bool in_heap() const // for testing { return (rv_.reserved_buffer() != &rv_.container()[0]); } private: ReservedContainer rv_; ContainerType& container() { return rv_.container(); } const ContainerType& container() const { return rv_.container(); } }; /* class Vector*/ /* Vector class derived from VectorBase - to be passed as a parameter */ template ::size_type capacity> class VectorDerived : public VectorBase { public: typedef typename VectorBase::size_type size_type; typedef typename VectorBase::value_type value_type; typedef typename VectorBase::reference reference; typedef typename VectorBase::const_reference const_reference; VectorDerived() : VectorBase(), v_() {} template ::size_type C> VectorDerived(const Vector& other) : VectorBase(), v_() { v_().assign(other().begin(), other().end()); } reference operator[] (size_type i) { return v_[i]; } const_reference operator[] (size_type i) const { return v_[i]; } size_type size () const { return v_.size(); } void reserve (size_type n) { v_.reserve(); } void resize (size_type n, value_type val = value_type()){ v_.resize();} void push_back(const value_type& val) { v_.push_back(); } private: Vector v_; }; /* class VectorDerived */ } /* namespace gu */ #endif /* _GU_VECTOR_HPP_ */ galera-4-26.4.25/galerautils/src/gu_stats.cpp000644 000164 177776 00000003211 15107057155 022151 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #include #include #include #include "gu_macros.h" #include "gu_stats.hpp" // http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // http://www.johndcook.com/standard_deviation.html void gu::Stats::insert(const double val) { n_++; if (gu_unlikely(n_ == 1)) { old_m_ = new_m_ = val; old_s_ = new_s_ = 0.0; min_ = val; max_ = val; } else { new_m_ = old_m_ + (val - old_m_) / n_; new_s_ = old_s_ + (val - old_m_) * (val - new_m_); old_m_ = new_m_; old_s_ = new_s_; min_ = std::min(min_, val); max_ = std::max(max_, val); } } // I guess it's okay to assign 0.0 if no data. double gu::Stats::min() const { return gu_likely(n_ > 0) ? min_ : 0.0; } double gu::Stats::max() const { return gu_likely(n_ > 0) ? max_ : 0.0; } double gu::Stats::mean() const { return gu_likely(n_ > 0) ? new_m_ : 0.0; } double gu::Stats::variance() const { // n_ > 1 ? new_s_ / (n_ - 1) : 0.0; // is to compute unbiased sample variance // not population variance. return gu_likely(n_ > 0) ? new_s_ / n_ : 0.0; } double gu::Stats::std_dev() const { return sqrt(variance()); } std::string gu::Stats::to_string() const { std::ostringstream os; os << *this; return os.str(); } std::ostream& gu::operator<<(std::ostream& os, const gu::Stats& stats) { return (os << stats.min() << "/" << stats.mean() << "/" << stats.max() << "/" << stats.std_dev() << "/" << stats.times()); } galera-4-26.4.25/galerautils/src/gu_shared_ptr.hpp000644 000164 177776 00000003175 15107057155 023164 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2015-2017 Codership Oy // // // Define gu::shared_ptr and gu::enable_shared_from_this. // // Because of the lack of alias template in C++ a workaround of defining // the type inside the struct shared_ptr is used. // // For example, defining shared pointer type for type T is done like: // // typedef gu::shared_ptr::type TPtr; // // #ifndef GU_SHARED_PTR_HPP #define GU_SHARED_PTR_HPP #if defined(HAVE_STD_SHARED_PTR) # include # define GU_SHARED_PTR_NAMESPACE std #elif defined(HAVE_TR1_SHARED_PTR) # include # define GU_SHARED_PTR_NAMESPACE std::tr1 #elif defined(HAVE_BOOST_SHARED_PTR_HPP) # include # include # include # define GU_SHARED_PTR_NAMESPACE boost #else #error No supported shared_ptr headers #endif namespace gu { template struct shared_ptr { typedef GU_SHARED_PTR_NAMESPACE::shared_ptr type; }; template struct enable_shared_from_this { typedef GU_SHARED_PTR_NAMESPACE::enable_shared_from_this type; }; #if __cplusplus >= 201103L /* variadic templates */ template typename shared_ptr::type make_shared(Args&&... args) { return GU_SHARED_PTR_NAMESPACE::make_shared(args...); } #else /* add more templates if needed */ template typename shared_ptr::type make_shared() { return GU_SHARED_PTR_NAMESPACE::make_shared(); } #endif } #undef GU_SHARED_PTR_NAMESPACE #endif // GU_SHARED_PTR_HPP galera-4-26.4.25/galerautils/src/gu_mmh3.h000644 000164 177776 00000004070 15107057155 021330 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2020 Codership Oy /** * @file MurmurHash3 header * * This code is based on the reference C++ MurMurHash3 implementation by its * author Austin Appleby, who released it to public domain. * * $Id$ */ #ifndef _gu_mmh3_h_ #define _gu_mmh3_h_ #include // uint*_t #include // size_t #ifdef __cplusplus extern "C" { #endif /*! A function to hash buffer in one go */ extern uint32_t gu_mmh32(const void* buf, size_t len); /* * 128-bit MurmurHash3 */ /* returns hash in the canonical byte order, as a byte array */ extern void gu_mmh128 (const void* const msg, size_t const len, void* const out); /* returns hash as an integer, in host byte-order */ extern uint64_t gu_mmh128_64 (const void* const msg, size_t len); /* returns hash as an integer, in host byte-order */ extern uint32_t gu_mmh128_32 (const void* const msg, size_t len); /* * Functions to hash stream * (only 128-bit version, 32-bit is not relevant any more) */ typedef struct gu_mmh128_ctx { uint64_t hash[2]; uint64_t tail[2]; size_t length; } gu_mmh128_ctx_t; /*! Initialize MMH context with a default Galera seed. */ extern void gu_mmh128_init (gu_mmh128_ctx_t* mmh); /*! Apeend message part to hash context */ extern void gu_mmh128_append(gu_mmh128_ctx_t* mmh, const void* part, size_t len); /*! Get the accumulated message hash (does not change the context) */ extern void gu_mmh128_get (const gu_mmh128_ctx_t* mmh, void* const res); extern uint64_t gu_mmh128_get64(const gu_mmh128_ctx_t* mmh); extern uint32_t gu_mmh128_get32(const gu_mmh128_ctx_t* mmh); /* * Below are fuctions with reference signatures for implementation verification */ extern void gu_mmh3_32 (const void* key, int len, uint32_t seed, void* out); #if 0 /* x86 variant is faulty and unsuitable for short keys, ignore */ extern void gu_mmh3_x86_128 (const void* key, int len, uint32_t seed, void* out); #endif /* 0 */ extern void gu_mmh3_x64_128 (const void* key, int len, uint32_t seed, void* out); #ifdef __cplusplus } #endif #endif /* _gu_mmh3_h_ */ galera-4-26.4.25/galerautils/src/gu_logger.hpp000644 000164 177776 00000006613 15107057155 022310 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * This code is based on an excellent article at Dr.Dobb's: * http://www.ddj.com/cpp/201804215?pgno=1 * * It looks ugly because it has to integrate with C logger - * in order to produce identical output */ #ifndef __GU_LOGGER__ #define __GU_LOGGER__ #include extern "C" { #include "gu_log.h" #include "gu_conf.h" } namespace gu { // some portability stuff #ifdef _gu_log_h_ enum LogLevel { LOG_FATAL = GU_LOG_FATAL, LOG_ERROR = GU_LOG_ERROR, LOG_WARN = GU_LOG_WARN, LOG_INFO = GU_LOG_INFO, LOG_DEBUG = GU_LOG_DEBUG, LOG_MAX }; typedef gu_log_cb_t LogCallback; #else enum LogLevel { LOG_FATAL, LOG_ERROR, LOG_WARN, LOG_INFO, LOG_DEBUG, LOG_MAX }; typedef void (*LogCallback) (int, const char*); #endif class Logger { private: Logger(const Logger&); Logger& operator =(const Logger&); void prepare_default (); const LogLevel level; #ifndef _gu_log_h_ static LogLevel max_level; static bool do_timestamp; static LogCallback logger; static void default_logger (int, const char*); #else #define max_level gu_log_max_level #define logger gu_log_cb #define default_logger gu_log_cb_default #endif protected: std::ostringstream os; public: Logger(LogLevel _level = LOG_INFO) : level (_level), os () {} virtual ~Logger() { logger (level, os.str().c_str()); } std::ostringstream& get(const char* file, const char* func, int line) { if (default_logger == logger) { prepare_default(); // prefix with timestamp and log level } /* provide file:func():line info only when debug logging is on */ if (static_cast(LOG_DEBUG) == static_cast(max_level)) { os << file << ':' << func << "():" << line << ": "; } return os; } static bool no_log (LogLevel lvl) { return (static_cast(lvl) > static_cast(max_level)); } static void set_debug_filter(const std::string&); static bool no_debug(const std::string&, const std::string&, const int); #ifndef _gu_log_h_ static void enable_tstamp (bool); static void enable_debug (bool); static void set_logger (LogCallback); #endif }; #define GU_LOG_CPP(level) \ if (gu::Logger::no_log(level)) {} \ else gu::Logger(level).get(__FILE__, __FUNCTION__, __LINE__) // USAGE: LOG(level) << item_1 << item_2 << ... << item_n; #define log_fatal GU_LOG_CPP(gu::LOG_FATAL) #define log_error GU_LOG_CPP(gu::LOG_ERROR) #define log_warn GU_LOG_CPP(gu::LOG_WARN) #define log_info GU_LOG_CPP(gu::LOG_INFO) #define log_debug \ if (gu::Logger::no_debug(__FILE__, __FUNCTION__, __LINE__)) {} else \ GU_LOG_CPP(gu::LOG_DEBUG) } #endif // __GU_LOGGER__ galera-4-26.4.25/galerautils/src/gu_asio_utils.hpp000644 000164 177776 00000001733 15107057155 023202 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** * Common implementation utilities for gu Asio */ #ifndef GU_ASIO_UTILS_HPP #define GU_ASIO_UTILS_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "asio/ip/address.hpp" #include // Workaround for clang 3.4 which pretends to be an old gcc compiler // which in turn turns off some features in boost headers. Also // GCC 4.4 and early Boost versions seem to be affected. #if (defined(__clang__) && __clang_major__ == 3 && __clang_minor__ <= 4) || \ (__GNUC__ == 4 && __GNUC_MINOR__ == 4) || BOOST_VERSION < 105300 namespace gu { template inline T* get_pointer(std::shared_ptr const& r) { return r.get(); } } #endif // (defined(__clang__) && __clang_major__ == 3 && // __clang_minor__ <= 4) || // (__GNUC__ == 4 && __GNUC_MINOR__ == 4) || BOOST_VERSION < 105300 #endif // GU_ASIO_UTILS_HPP galera-4-26.4.25/galerautils/src/gu_asio_stream_react.hpp000644 000164 177776 00000026262 15107057155 024517 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020-2024 Codership Oy // /** @file gu_asio_stream_react.hpp * * Asio stream socket implementations based on reactive model. * The AsioStreamReact controls TCP socket and reacts on * event notifications from IO service. The event handling is * then delegated to AsioStreamEngine object, whose * responsibility is to handle the events from socket and * forward them to AsioSocketHandler when appropriate. */ #ifndef GU_ASIO_STREAM_REACT_HPP #define GU_ASIO_STREAM_REACT_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio.hpp" #include "gu_asio_stream_engine.hpp" #include "gu_buffer.hpp" #include "asio/ip/tcp.hpp" #include #include "gu_disable_non_virtual_dtor.hpp" #include "gu_compiler.hpp" namespace gu { class AsioStreamReact : public AsioSocket , public std::enable_shared_from_this { public: AsioStreamReact(AsioIoService&, const std::string&, const std::shared_ptr&); AsioStreamReact(const AsioStreamReact&) = delete; AsioStreamReact& operator=(const AsioStreamReact&) = delete; ~AsioStreamReact(); virtual void open(const gu::URI&) GALERA_OVERRIDE; virtual bool is_open() const GALERA_OVERRIDE; virtual void shutdown() GALERA_OVERRIDE; virtual void close() GALERA_OVERRIDE; virtual void bind(const gu::AsioIpAddress&) GALERA_OVERRIDE; virtual void async_connect( const gu::URI&, const std::shared_ptr&) GALERA_OVERRIDE; virtual void async_write(const std::array&, const std::shared_ptr&) GALERA_OVERRIDE; virtual void async_read(const AsioMutableBuffer&, const std::shared_ptr&) GALERA_OVERRIDE; virtual void connect(const gu::URI&) GALERA_OVERRIDE; virtual size_t write(const AsioConstBuffer&) GALERA_OVERRIDE; virtual size_t read(const AsioMutableBuffer&) GALERA_OVERRIDE; virtual std::string local_addr() const GALERA_OVERRIDE; virtual std::string remote_addr() const GALERA_OVERRIDE; virtual void set_receive_buffer_size(size_t) GALERA_OVERRIDE; virtual size_t get_receive_buffer_size() GALERA_OVERRIDE; virtual void set_send_buffer_size(size_t) GALERA_OVERRIDE; virtual size_t get_send_buffer_size() GALERA_OVERRIDE; virtual struct tcp_info get_tcp_info() GALERA_OVERRIDE; // Handlers for ASIO service. void complete_client_handshake( const std::shared_ptr&, AsioStreamEngine::op_status); void complete_server_handshake( const std::shared_ptr&, AsioStreamEngine::op_status); void connect_handler(const std::shared_ptr&, const asio::error_code& ec); void client_handshake_handler(const std::shared_ptr&, const asio::error_code&); void server_handshake_handler( const std::shared_ptr&, const asio::error_code& ec); void read_handler(const std::shared_ptr&, const asio::error_code&); void write_handler(const std::shared_ptr&, const asio::error_code&); private: friend class AsioAcceptorReact; void assign_addresses(); void prepare_engine(bool non_blocking); // Start async read if not in progress. May be called several times // without handling read in between. template void start_async_read(Fn fn, FnArgs... args); // Start async write if not in progress. May be called several times // without handling a write in between. template void start_async_write(Fn, FnArgs...); void complete_read_op(const std::shared_ptr&, size_t bytes_transferred); void complete_write_op(const std::shared_ptr&, size_t bytes_transferred); void handle_read_handler_error( const std::shared_ptr&, const AsioErrorCode&); void handle_write_handler_error( const std::shared_ptr&, const AsioErrorCode&); void handle_isolation_error( const std::shared_ptr&); void set_non_blocking(bool); std::string debug_print() const; // Data members AsioIoService& io_service_; asio::ip::tcp::socket socket_; std::string scheme_; std::shared_ptr engine_; std::string local_addr_; std::string remote_addr_; bool connected_; bool handshake_complete_; bool non_blocking_; // Flags and state for operations in progress. static const int read_in_progress = 0x1; static const int write_in_progress = 0x2; static const int shutdown_in_progress = 0x4; // static const int client_handshake_in_progress = 0x4; // static const int server_handshake_in_progress = 0x8; static const int engine_wants_read = 0x10; static const int engine_wants_write = 0x20; int in_progress_; class ReadContext { public: ReadContext() : buf_() , bytes_transferred_() , read_completion_() { } ReadContext(const AsioMutableBuffer& buf) : buf_(buf) , bytes_transferred_() , read_completion_() { } ReadContext(const ReadContext&) = default; ReadContext& operator=(const ReadContext&) = default; const AsioMutableBuffer& buf() const { return buf_ ;} size_t bytes_transferred() const { return bytes_transferred_; } void read_completion(size_t read_completion) { assert(read_completion <= left_to_read()); read_completion_ = read_completion; } size_t read_completion() const { return read_completion_; } void inc_bytes_transferred(size_t val) { bytes_transferred_ += val; } // Bytes left to read on async read operation. This is either // remaining space left in the input buffer, or number of // bytes requested to read as indicated by read completion. size_t left_to_read() const { return (read_completion_ ? read_completion_ : buf_.size() - bytes_transferred_); } void reset() { buf_ = AsioMutableBuffer(); bytes_transferred_ = 0; read_completion_ = 0; } private: AsioMutableBuffer buf_; size_t bytes_transferred_; size_t read_completion_; } read_context_; class WriteContext { public: WriteContext() : buf_(), bytes_transferred_() { } WriteContext(const std::array& bufs) : buf_() , bytes_transferred_() { for (auto i(bufs.begin()); i != bufs.end(); ++i) { buf_.insert(buf_.end(), reinterpret_cast(i->data()), reinterpret_cast(i->data()) + i->size()); } } WriteContext(const WriteContext&) = default; WriteContext& operator=(const WriteContext&) = default; const gu::Buffer& buf() const { return buf_; } size_t bytes_transferred() const { return bytes_transferred_; } void inc_bytes_transferred(size_t val) { bytes_transferred_ += val; } void reset() { buf_.clear(); bytes_transferred_ = 0; } private: // A temporary buffer to copy data in before writing it to // socket. Trying to do scatter/gather IO with stream processing // engine which might involve TLS/SSL does not make any sense // as scatter/gather is not implemented, at least for OpenSSL. // Therefore we write the user provided buffer into single // continuous memory block which is passed to write function. // Galera typically sends relatively small messages in // async mode so the memory overhead should be minimal. // CPU overhead pales in comparison to encryption overhead, // so we can ignore it here. // // Another option would be writing buffers one by one, but given // the nature of gcomm messages (short header, payload), this // would increase the number of system calls significantly, which // quite likely would lead to higher overhead than buffer copy. gu::Buffer buf_; size_t bytes_transferred_; } write_context_; }; class AsioAcceptorReact : public AsioAcceptor , public std::enable_shared_from_this { public: AsioAcceptorReact(AsioIoService&, const std::string& scheme); virtual void open(const gu::URI&) GALERA_OVERRIDE; virtual bool is_open() const GALERA_OVERRIDE; virtual void listen(const gu::URI&) GALERA_OVERRIDE; virtual void close() GALERA_OVERRIDE; virtual void async_accept( const std::shared_ptr&, const std::shared_ptr&, const std::shared_ptr& engine = nullptr) GALERA_OVERRIDE; virtual std::shared_ptr accept() GALERA_OVERRIDE; virtual std::string listen_addr() const GALERA_OVERRIDE; virtual unsigned short listen_port() const GALERA_OVERRIDE; virtual void set_receive_buffer_size(size_t) GALERA_OVERRIDE; virtual size_t get_receive_buffer_size() GALERA_OVERRIDE; virtual void set_send_buffer_size(size_t) GALERA_OVERRIDE; virtual size_t get_send_buffer_size() GALERA_OVERRIDE; // ASIO handlers void accept_handler(const std::shared_ptr&, const std::shared_ptr&, const std::shared_ptr&, const asio::error_code&); private: std::string debug_print() const; AsioIoService& io_service_; asio::ip::tcp::acceptor acceptor_; std::string scheme_; bool listening_; std::shared_ptr engine_; }; } // namespace gu #include "gu_enable_non_virtual_dtor.hpp" #endif // GU_ASIO_STREAM_REACT_HPP galera-4-26.4.25/galerautils/src/gu_atomic.h000644 000164 177776 00000006601 15107057155 021742 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2014 Codership Oy /** * @file Atomic memory access functions. At the moment these follow * __atomic_XXX convention from GCC. */ #ifndef GU_ATOMIC_H #define GU_ATOMIC_H #ifdef __cplusplus extern "C" { #endif // So far in tests full memory sync shows the most consistent performance - // and it's the safest. @todo: reassess this later. #define GU_ATOMIC_SYNC_DEFAULT GU_ATOMIC_SYNC_FULL #ifdef __GNUC__ #if defined(__ATOMIC_RELAXED) // use __atomic_XXX builtins #define GU_ATOMIC_SYNC_NONE __ATOMIC_RELAXED #define GU_ATOMIC_SYNC_DEPEND __ATOMIC_ACQ_REL #define GU_ATOMIC_SYNC_FULL __ATOMIC_SEQ_CST #define gu_atomic_fetch_and_add(ptr, val) \ __atomic_fetch_add(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_sub(ptr, val) \ __atomic_fetch_sub(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_or(ptr, val) \ __atomic_fetch_or(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_and(ptr, val) \ __atomic_fetch_and(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_xor(ptr, val) \ __atomic_fetch_xor(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_fetch_and_nand(ptr, val) \ __atomic_fetch_nand(ptr, val,GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_add_and_fetch(ptr, val) \ __atomic_add_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_sub_and_fetch(ptr, val) \ __atomic_sub_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_or_and_fetch(ptr, val) \ __atomic_or_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_and_and_fetch(ptr, val) \ __atomic_and_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_xor_and_fetch(ptr, val) \ __atomic_xor_fetch(ptr, val, GU_ATOMIC_SYNC_DEFAULT) #define gu_atomic_nand_and_fetch(ptr, val) \ __atomic_nand_fetch(ptr, val,GU_ATOMIC_SYNC_DEFAULT) // stores contents of vptr into ptr #define gu_atomic_set(ptr, vptr) \ __atomic_store(ptr, vptr, GU_ATOMIC_SYNC_DEFAULT) // loads contents of ptr to vptr #define gu_atomic_get(ptr, vptr) \ __atomic_load(ptr, vptr, GU_ATOMIC_SYNC_DEFAULT) #elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) // use __sync_XXX builtins #define GU_ATOMIC_SYNC_NONE 0 #define GU_ATOMIC_SYNC_DEPEND 0 #define GU_ATOMIC_SYNC_FULL 0 #define gu_atomic_fetch_and_add __sync_fetch_and_add #define gu_atomic_fetch_and_sub __sync_fetch_and_sub #define gu_atomic_fetch_and_or __sync_fetch_and_or #define gu_atomic_fetch_and_and __sync_fetch_and_and #define gu_atomic_fetch_and_xor __sync_fetch_and_xor #define gu_atomic_fetch_and_nand __sync_fetch_and_nand #define gu_atomic_add_and_fetch __sync_add_and_fetch #define gu_atomic_sub_and_fetch __sync_sub_and_fetch #define gu_atomic_or_and_fetch __sync_or_and_fetch #define gu_atomic_and_and_fetch __sync_and_and_fetch #define gu_atomic_xor_and_fetch __sync_xor_and_fetch #define gu_atomic_nand_and_fetch __sync_nand_and_fetch #define gu_atomic_set(ptr, vptr) \ while (!__sync_bool_compare_and_swap(ptr, *ptr, *vptr)); #define gu_atomic_get(ptr, vptr) *vptr = __sync_fetch_and_or(ptr, 0) #else #error "This GCC version does not support 8-byte atomics on this platform. Use GCC >= 4.7.x." #endif /* __ATOMIC_RELAXED */ #else /* __GNUC__ */ #error "Compiler not supported" #endif #ifdef __cplusplus } #endif #endif /* !GU_ATOMIC_H */ galera-4-26.4.25/galerautils/src/gu_time.h000644 000164 177776 00000005705 15107057155 021430 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2008 Codership Oy /** * @file time manipulation functions/macros * * $Id$ */ #ifndef _gu_time_h_ #define _gu_time_h_ #include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** Returns seconds */ static inline double gu_timeval_diff (struct timeval* left, struct timeval* right) { long long diff = left->tv_sec; diff = ((diff - right->tv_sec)*1000000LL) + left->tv_usec - right->tv_usec; return (((double)diff) * 1.0e-06); } static inline void gu_timeval_add (struct timeval* t, double s) { double ret = (double)t->tv_sec + ((double)t->tv_usec) * 1.0e-06 + s; t->tv_sec = (long)ret; t->tv_usec = (long)((ret - (double)t->tv_sec) * 1.0e+06); } static const double SEC_PER_CLOCK = ((double)1.0)/CLOCKS_PER_SEC; /** Returns seconds */ static inline double gu_clock_diff (clock_t left, clock_t right) { return ((double)(left - right)) * SEC_PER_CLOCK; } #include /** * New time interface * * All functions return nanoseconds. */ /* Maximum date representable by long long and compatible with timespec */ #define GU_TIME_ETERNITY 9223372035999999999LL #if defined(__APPLE__) /* synced with linux/time.h */ # define CLOCK_REALTIME 0 # define CLOCK_MONOTONIC 1 typedef int clockid_t; int clock_gettime (clockid_t clk_id, struct timespec * tp); #endif /* __APPLE__ */ static inline long long gu_time_getres() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_getres (CLOCK_REALTIME, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return 1000LL; // assumed resolution of gettimeofday() in nanoseconds #endif } static inline long long gu_time_calendar() { #if _POSIX_TIMERS > 0 || defined(__APPLE__) struct timespec tmp; clock_gettime (CLOCK_REALTIME, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else struct timeval tmp; gettimeofday (&tmp, NULL); return ((tmp.tv_sec * 1000000000LL) + (tmp.tv_usec * 1000LL)); #endif } static inline long long gu_time_monotonic() { #if defined(_POSIX_MONOTONIC_CLOCK) || defined(__APPLE__) struct timespec tmp; clock_gettime (CLOCK_MONOTONIC, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else struct timeval tmp; gettimeofday (&tmp, NULL); return ((tmp.tv_sec * 1000000000LL) + (tmp.tv_usec * 1000LL)); #endif } #ifdef CLOCK_PROCESS_CPUTIME_ID static inline long long gu_time_process_cputime() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_gettime (CLOCK_PROCESS_CPUTIME_ID, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return -1; #endif } #endif /* CLOCK_PROCESS_CPUTIME_ID */ static inline long long gu_time_thread_cputime() { #if _POSIX_TIMERS > 0 struct timespec tmp; clock_gettime (CLOCK_THREAD_CPUTIME_ID, &tmp); return ((tmp.tv_sec * 1000000000LL) + tmp.tv_nsec); #else return -1; #endif } #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_time_h_ */ galera-4-26.4.25/galerautils/src/gu_serializable.hpp000644 000164 177776 00000006031 15107057155 023471 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file Declaration of serializeble interface that all serializable classes * should inherit. * * $Id$ */ #ifndef GU_SERIALIZABLE_HPP #define GU_SERIALIZABLE_HPP #include "gu_types.hpp" #include "gu_throw.hpp" #include "gu_assert.hpp" #include #include // for std::length_error namespace gu { class Serializable { public: /*! returns the size of a buffer required to serialize the object */ ssize_t serial_size () const { return my_serial_size(); } /*! * serializes this object into buf and returns serialized size * * @param buf pointer to buffer * @param size size of buffer * @return serialized size * * may throw exceptions */ ssize_t serialize_to (void* const buf, ssize_t const size) const { return my_serialize_to (buf, size); } /*! * serializes this object into byte vector v, reallocating it if needed * returns the size of serialized object */ ssize_t serialize_to (std::vector& v) const { size_t const old_size (v.size()); size_t const new_size (serial_size() + old_size); try { v.resize (new_size, 0); } catch (std::length_error& l) { gu_throw_error(EMSGSIZE) << "length_error: " << l.what(); } catch (...) { gu_throw_error(ENOMEM) << "could not resize to " << new_size << " bytes"; } try { return serialize_to (&v[old_size], new_size - old_size); } catch (...) { v.resize (old_size); throw; } } protected: ~Serializable() {} private: virtual ssize_t my_serial_size () const = 0; virtual ssize_t my_serialize_to (void* buf, ssize_t size) const = 0; }; static inline std::vector& operator << (std::vector& out, const Serializable& s) { s.serialize_to (out); return out; } #if 0 // seems to be a pointless idea class DeSerializable { public: /* serial size of an object stored at ptr, may be not implemented */ template static ssize_t serial_size (const byte_t* const buf, ssize_t const size) { assert (size > 0); return DS::my_serial_size (buf, size); } /* serial size of an object stored at ptr, may be not implemented */ ssize_t deserialize_from (const byte_t* const buf, ssize_t const size) { assert (size > 0); return my_deserialize_from (buf, size); } ssize_t deserialize_from (const std::vector& in,size_t const offset) { return deserialize_from (&in[offset], in.size() - offset); } protected: ~DeSerializable() {} private: /* serial size of an object stored at ptr, may be not implemented */ virtual ssize_t my_deserialize_from (const byte_t* buf, ssize_t size) = 0; }; #endif // 0 } /* namespace gu */ #endif /* GU_SERIALIZABLE_HPP */ galera-4-26.4.25/galerautils/src/gu_backtrace.c000644 000164 177776 00000001151 15107057155 022373 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy #include "gu_backtrace.h" #include "gu_log.h" #if defined(HAVE_EXECINFO_H) && defined(__GNUC__) #include #include char** gu_backtrace(int* size) { char** strings; void** array = malloc(*size * sizeof(void*)); if (!array) { gu_error("could not allocate memory for %d pointers\n", *size); return NULL; } *size = backtrace(array, *size); strings = backtrace_symbols(array, *size); free(array); return strings; } #else char **gu_backtrace(int* size) { return NULL; } #endif /* */ galera-4-26.4.25/galerautils/src/gu_config.hpp000644 000164 177776 00000024506 15107057155 022277 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2014 Codership Oy /** * @file * Configuration management class * * $Id$ */ #ifndef _gu_config_hpp_ #define _gu_config_hpp_ #include "gu_string_utils.hpp" #include "gu_exception.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include #include #include namespace gu { class Config; } extern "C" const char* gu_str2ll (const char* str, long long* ll); class gu::Config { public: static const char PARAM_SEP; // parameter separator static const char KEY_VALUE_SEP; // key-value separator static const char ESCAPE; // escape symbol Config (); bool has (const std::string& key) const { return (params_.find(key) != params_.end()); } bool is_set (const std::string& key) const { param_map_t::const_iterator const i(params_.find(key)); if (i != params_.end()) { return i->second.is_set(); } else { throw NotFound(); } } /* adds parameter to the known parameter list */ void add (const std::string& key) { gu_trace(key_check(key)); if (!has(key)) { params_[key] = Parameter(); } } void add (const std::string& key, int flags) { gu_trace(key_check(key)); if (!has(key)) { params_[key] = Parameter(flags); } } /* adds parameter to the known parameter list and sets its value */ void add (const std::string& key, const std::string& value) { gu_trace(key_check(key)); if (!has(key)) { params_[key] = Parameter(value); } } void add (const std::string& key, const std::string& value, int flags) { gu_trace(key_check(key)); if (!has(key)) { params_[key] = Parameter(value, flags); } } /* sets a known parameter to some value, otherwise throws NotFound */ void set (const std::string& key, const std::string& value) { param_map_t::iterator const i(params_.find(key)); if (i != params_.end()) { if (deprecation_check_func_) { deprecation_check_func_(i->first, i->second); } i->second.set(value); } else { #ifndef NDEBUG log_debug << "Key '" << key << "' not recognized."; #endif throw NotFound(); } } void set (const std::string& key, const char* value) { set(key, std::string(value)); } /* Sets flags of the parameter with given key. * Throws NotFound, if the key is not present. */ void set_flags (const std::string& key, int flags) { param_map_t::iterator const i(params_.find(key)); if (i != params_.end()) { i->second.set_flags(flags); } else { throw NotFound(); } } /* Parse a string of semicolon separated key=value pairs into a vector. * Throws Exception in case of parsing error. */ static void parse (std::vector >& params_vector, const std::string& params_string); /* Parse a string of semicolumn separated key=value pairs and * set the values. * Throws NotFound if key was not explicitly added before. */ void parse (const std::string& params_string); /* General template for integer types */ template void set (const std::string& key, T val) { set_longlong (key, val); } /*! @throws NotSet, NotFound */ const std::string& get (const std::string& key) const { param_map_t::const_iterator const i(params_.find(key)); if (i == params_.end()) { log_debug << "key '" << key << "' not found."; throw NotFound(); } if (i->second.is_set()) return i->second.value(); log_debug << "key '" << key << "' not set."; throw NotSet(); } const std::string& get (const std::string& key, const std::string& def) const { try { return get(key); } catch (NotSet&) { return def ; } } /*! @throws NotFound */ template inline T get (const std::string& key) const { return from_config (get(key)); } template inline T get(const std::string& key, const T& def) const { try { return get(key); } catch (NotSet&) { return def; } } void print (std::ostream& os, bool include_not_set = false) const; /*! Convert string configuration values to other types. * General template for integers, specialized templates follow below. * @throw gu::Exception in case conversion failed */ template static inline T from_config (const std::string& value) { const char* str = value.c_str(); long long ret; errno = 0; // this is needed to detect overflow const char* endptr = gu_str2ll (str, &ret); check_conversion (str, endptr, "integer", ERANGE == errno); switch (sizeof(T)) { case 1: return overflow_char (ret); case 2: return overflow_short (ret); case 4: return overflow_int (ret); default: return ret; } } /* iterator stuff */ struct Flag { static const int hidden = (1 << 0); static const int deprecated = (1 << 1); static const int read_only = (1 << 2); static const int type_bool = (1 << 3); static const int type_integer = (1 << 4); static const int type_double = (1 << 5); static const int type_duration = (1 << 6); static const int type_mask = Flag::type_bool | Flag::type_integer | Flag::type_double | Flag::type_duration; static std::string to_string(int f) { std::ostringstream s; if (f & Flag::hidden) s << "hidden | "; if (f & Flag::deprecated) s << "deprecated | "; if (f & Flag::read_only) s << "read_only | "; if (f & Flag::type_bool) s << "bool | "; if (f & Flag::type_integer) s << "integer | "; if (f & Flag::type_double) s << "double | "; if (f & Flag::type_duration) s << "duration | "; std::string ret(s.str()); if (ret.length() > 3) ret.erase(ret.length() - 3); return ret; } }; class Parameter { public: explicit Parameter() : value_(), set_(false), flags_(0) {} Parameter(const std::string& value) : value_(value), set_(true), flags_(0) {} Parameter(int flags) : value_(), set_(false), flags_(flags) {} Parameter(const std::string& value, int flags) : value_(value), set_(true), flags_(flags) {} const std::string& value() const { return value_; } bool is_set() const { return set_ ; } int flags() const { return flags_; } bool is_hidden() const { return flags_ & Flag::hidden; } bool is_deprecated() const { return flags_ & Flag::deprecated; } void set(const std::string& value) { value_ = value; set_ = true; } void set_flags(int flags) { flags_ = flags; } private: std::string value_; bool set_; int flags_; }; typedef std::map param_map_t; typedef param_map_t::const_iterator const_iterator; const_iterator begin() const { return params_.begin(); } const_iterator end() const { return params_.end(); } static void enable_deprecation_check(); static void disable_deprecation_check(); private: static void key_check (const std::string& key); static void check_conversion (const char* ptr, const char* endptr, const char* type, bool range_error = false); static void check_deprecated(const std::string& str, const Parameter& param); static char overflow_char(long long ret); static short overflow_short(long long ret); static int overflow_int(long long ret); void set_longlong (const std::string& key, long long value); param_map_t params_; static std::function deprecation_check_func_; }; extern "C" const char* gu_str2dbl (const char* str, double* dbl); extern "C" const char* gu_str2bool (const char* str, bool* bl); extern "C" const char* gu_str2ptr (const char* str, void** ptr); namespace gu { std::ostream& operator<<(std::ostream&, const gu::Config&); /*! Specialized templates for "funny" types */ template <> inline double Config::from_config (const std::string& value) { const char* str = value.c_str(); double ret; errno = 0; // this is needed to detect over/underflow const char* endptr = gu_str2dbl (str, &ret); check_conversion (str, endptr, "double", ERANGE == errno); return ret; } template <> inline bool Config::from_config (const std::string& value) { const char* str = value.c_str(); bool ret; const char* endptr = gu_str2bool (str, &ret); check_conversion (str, endptr, "boolean"); return ret; } template <> inline void* Config::from_config (const std::string& value) { const char* str = value.c_str(); void* ret; const char* endptr = gu_str2ptr (str, &ret); check_conversion (str, endptr, "pointer"); return ret; } template <> inline void Config::set (const std::string& key, const void* value) { set (key, to_string(value)); } template <> inline void Config::set (const std::string& key, double val) { set (key, to_string(val)); } template <> inline void Config::set (const std::string& key, bool val) { const char* val_str(val ? "YES" : "NO"); // YES/NO is most generic set (key, val_str); } } #endif /* _gu_config_hpp_ */ galera-4-26.4.25/galerautils/src/gu_limits.c000644 000164 177776 00000007407 15107057155 021767 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2016 Codership Oy /** * @file system limit macros * * $Id:$ */ #include "gu_limits.h" #include "gu_log.h" #include #include #include #if defined(__APPLE__) #include // doesn't seem to be used directly, but jst in case #include static long darwin_phys_pages (void) { /* Note: singleton pattern would be useful here */ vm_statistics64_data_t vm_stat; unsigned int count = HOST_VM_INFO64_COUNT; kern_return_t ret = host_statistics64 (mach_host_self (), HOST_VM_INFO64, (host_info64_t) &vm_stat, &count); if (ret != KERN_SUCCESS) { gu_error ("host_statistics64 failed with code %d", ret); return 0; } /* This gives a value a little less than physical memory of computer */ return vm_stat.free_count + vm_stat.active_count + vm_stat.inactive_count + vm_stat.wire_count; /* Exact value may be obtain via sysctl ({CTL_HW, HW_MEMSIZE}) */ /* Note: sysctl is 60% slower compared to host_statistics64 */ } static long darwin_avphys_pages (void) { vm_statistics64_data_t vm_stat; unsigned int count = HOST_VM_INFO64_COUNT; kern_return_t ret = host_statistics64 (mach_host_self (), HOST_VM_INFO64, (host_info64_t) &vm_stat, &count); if (ret != KERN_SUCCESS) { gu_error ("host_statistics64 failed with code %d", ret); return 0; } /* Note: * vm_stat.free_count == vm_page_free_count + vm_page_speculative_count */ return vm_stat.free_count - vm_stat.speculative_count; } static inline size_t page_size() { return getpagesize(); } static inline size_t phys_pages() { return darwin_phys_pages(); } static inline size_t avphys_pages() { return darwin_avphys_pages(); } #elif defined(__FreeBSD__) #include // VM_TOTAL #include // struct vmtotal #include static long freebsd_avphys_pages (void) { /* TODO: 1) sysctlnametomib may be called once */ /* 2) vm.stats.vm.v_cache_count is potentially free memory too */ int mib_vm_stats_vm_v_free_count[4]; size_t mib_sz = 4; int rc = sysctlnametomib ("vm.stats.vm.v_free_count", mib_vm_stats_vm_v_free_count, &mib_sz); if (rc != 0) { gu_error ("sysctlnametomib(vm.stats.vm.v_free_count) failed, code %d", rc); return 0; } unsigned int vm_stats_vm_v_free_count; size_t sz = sizeof (vm_stats_vm_v_free_count); rc = sysctl (mib_vm_stats_vm_v_free_count, mib_sz, &vm_stats_vm_v_free_count, &sz, NULL, 0); if (rc != 0) { gu_error ("sysctl(vm.stats.vm.v_free_count) failed with code %d", rc); return 0; } return vm_stats_vm_v_free_count; } static inline size_t page_size() { return sysconf(_SC_PAGESIZE); } static inline size_t phys_pages() { return sysconf(_SC_PHYS_PAGES); } static inline size_t avphys_pages() { return freebsd_avphys_pages(); } #else /* !__APPLE__ && !__FreeBSD__ */ static inline size_t page_size() { return sysconf(_SC_PAGESIZE); } static inline size_t phys_pages() { return sysconf(_SC_PHYS_PAGES); } static inline size_t avphys_pages() { return sysconf(_SC_AVPHYS_PAGES); } #endif /* !__APPLE__ && !__FreeBSD__ */ #define GU_DEFINE_FUNCTION(func) \ size_t gu_##func() \ { \ static size_t ret = 0; \ if (0 == ret) ret = func(); \ return ret; \ } GU_DEFINE_FUNCTION(page_size) GU_DEFINE_FUNCTION(phys_pages) GU_DEFINE_FUNCTION(avphys_pages) galera-4-26.4.25/galerautils/src/gu_string.hpp000644 000164 177776 00000017215 15107057155 022337 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /*! * @file string class template that allows to allows to allocate initial storage * to hold string data together with the object. If storage is exhausted, * it transparently overflows to heap. */ #ifndef _GU_STRING_HPP_ #define _GU_STRING_HPP_ #include "gu_vector.hpp" #include #include // std::bad_alloc #include #include // realloc() #include // strlen(), strcmp() #include // snprintf() #include #include "gu_macros.h" // gu_likely() namespace gu { /* container for a printf()-like format */ struct Fmt { explicit Fmt(const char* f) : fmt_(f) {} const char* const fmt_; }; template class StringBase { public: typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef size_t size_type; size_type size() const { return size_; } size_type length()const { return size(); } pointer c_str() { return str_; } const_pointer c_str() const { return str_; } StringBase& operator<< (const Fmt& f) { fmt_ = f.fmt_; return *this; } StringBase& operator<< (const StringBase& s) { size_type const n(s.size()); append_string (s.c_str(), n); return *this; } StringBase& operator<< (const char* s) { size_type const n(::strlen(s)); append_string (s, n); return *this; } StringBase& operator<< (const std::string& s) { append_string (s.c_str(), s.length()); return *this; } StringBase& operator<< (const bool& b) { // following std::boolalpha if (b) append_string ("true", 4); else append_string ("false", 5); return *this; } StringBase& operator<< (const double& d) { convert ("%f", std::numeric_limits::digits10, d); return *this; } StringBase& operator<< (const void* const ptr) { /* not using %p here seeing that it may be not universally supported */ static size_type const ptr_len(sizeof(ptr) == 4 ? 11 : 19 ); static const char* const fmt(sizeof(ptr) == 4 ? "0x%08lx":"0x%016lx"); convert (fmt, ptr_len, reinterpret_cast(ptr)); return *this; } StringBase& operator<< (const long long &i) { convert ("%lld", 21, i); return *this; } StringBase& operator<< (const unsigned long long &i) { convert ("%llu", 20, i); return *this; } StringBase& operator<< (const int &i) { convert ("%d", 11, i); return *this; } StringBase& operator<< (const unsigned int &i) { convert ("%u", 10, i); return *this; } StringBase& operator<< (const short &i) { convert ("%hd", 6, i); return *this; } StringBase& operator<< (const unsigned short &i) { convert ("%hu", 5, i); return *this; } StringBase& operator<< (const char &c) { convert ("%c", 1, c); return *this; } StringBase& operator<< (const unsigned char &c) { convert ("%hhu", 3, c); return *this; } template StringBase& operator+= (const X& x) { return operator<<(x); } bool operator== (const StringBase& other) { return (size() == other.size() && !::strcmp(c_str(), other.c_str())); } bool operator== (const std::string& other) { return (size() == other.size() && !::strcmp(c_str(), other.c_str())); } bool operator== (const char* s) { size_type const s_size(::strlen(s)); return (size() == s_size && !::strcmp(c_str(), s)); } template bool operator!= (const X& x) { return !operator==(x); } void clear() { derived_clear(); }; StringBase& operator= (const StringBase& other) { clear(); append_string (other.c_str(), other.size()); return *this; } StringBase& operator= (const char* const other) { clear(); append_string (other, ::strlen(other)); return *this; } protected: pointer str_; // points to an adequately sized memory area const char* fmt_; size_type size_; virtual void reserve (size_type n) = 0; virtual void derived_clear() = 0; // real clear must happen in derived class void append_string (const_pointer const s, size_type const n) { reserve(size_ + n + 1); std::copy(s, s + n, &str_[size_]); size_ += n; str_[size_] = 0; } template void convert (const char* const format, size_type max_len, const X& x) { ++max_len; // add null termination reserve(size_ + max_len); int const n(snprintf(&str_[size_], max_len, fmt_ ? fmt_ : format, x)); assert(n > 0); assert(size_type(n) < max_len); if (gu_likely(n > 0)) size_ += n; str_[size_] = 0; // null-terminate even if snprintf() failed. fmt_ = NULL; } StringBase(pointer init_buf) : str_(init_buf), fmt_(NULL), size_(0) {} virtual ~StringBase() {} private: StringBase(const StringBase&); }; /* class StringBase */ template std::ostream& operator<< (std::ostream& os, const gu::StringBase& s) { os << s.c_str(); return os; } template class String : public StringBase { public: typedef T value_type; typedef T* pointer; typedef const T* const_pointer; typedef size_t size_type; String() : StringBase(buf_), reserved_(capacity), buf_() { buf_[0] = 0; } explicit String(const StringBase& s) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s.c_str(), s.size()); } String(const T* s, size_type n) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s, n); } explicit String(const char* s) : StringBase(buf_), reserved_(capacity), buf_() { size_type const n(strlen(s)); append_string (s, n); } explicit String(const std::string& s) : StringBase(buf_), reserved_(capacity), buf_() { append_string (s.c_str(), s.length()); } #if 0 String& operator= (String other) { using namespace std; swap(other); return *this; } #endif template String& operator= (const X& x) { base::operator=(x); return *this; } ~String() { if (base::str_ != buf_) ::free(base::str_); } private: size_type reserved_; value_type buf_[capacity]; typedef StringBase base; void reserve (size_type const n) { if (n <= reserved_) return; assert (n > capacity); bool const overflow(buf_ == base::str_); pointer const tmp (static_cast (::realloc(overflow ? NULL : base::str_, n * sizeof(value_type)))); if (NULL == tmp) throw std::bad_alloc(); if (overflow) std::copy(buf_, buf_ + base::size_, tmp); base::str_ = tmp; reserved_ = n; } void derived_clear() { if (base::str_ != buf_) ::free(base::str_); base::str_ = buf_; base::size_ = 0; buf_[0] = 0; reserved_ = capacity; } void append_string (const_pointer s, size_type n) { base::append_string(s, n); } }; /* class String */ } /* namespace gu */ #endif /* _GU_STRING_HPP_ */ galera-4-26.4.25/galerautils/src/gu_buf.h000644 000164 177776 00000000515 15107057155 021240 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy */ /** * @file generic buffer declaration * * $Id$ */ #ifndef _gu_buf_h_ #define _gu_buf_h_ #include "gu_types.h" #ifdef __cplusplus extern "C" { #endif struct gu_buf { const void* ptr; ssize_t size; }; #ifdef __cplusplus } #endif #endif /* _gu_buf_h_ */ galera-4-26.4.25/galerautils/src/gu_threads.h000644 000164 177776 00000015457 15107057155 022131 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2017 Codership Oy /** * @file Abstracts naitive multithreading API behind POSIX threads-like API */ #ifndef _gu_mutex_h_ #define _gu_mutex_h_ #include "gu_types.h" // bool #if __unix__ #include typedef pthread_t gu_thread_t_SYS; #define gu_thread_create_SYS pthread_create #define gu_thread_join_SYS pthread_join #define gu_thread_cancel_SYS pthread_cancel #define gu_thread_exit_SYS pthread_exit #define gu_thread_detach_SYS pthread_detach #define gu_thread_self_SYS pthread_self #define gu_thread_equal_SYS pthread_equal #define GU_THREAD_INITIALIZER_SYS 0 typedef pthread_mutexattr_t gu_mutexattr_t_SYS; typedef pthread_mutex_t gu_mutex_t_SYS; #define gu_mutex_init_SYS pthread_mutex_init #define gu_mutex_lock_SYS pthread_mutex_lock #define gu_mutex_unlock_SYS pthread_mutex_unlock #define gu_mutex_destroy_SYS pthread_mutex_destroy #define GU_MUTEX_INITIALIZER_SYS PTHREAD_MUTEX_INITIALIZER typedef pthread_condattr_t gu_condattr_t_SYS; typedef pthread_cond_t gu_cond_t_SYS; #define gu_cond_init_SYS pthread_cond_init #define gu_cond_destroy_SYS pthread_cond_destroy #define gu_cond_wait_SYS pthread_cond_wait #define gu_cond_timedwait_SYS pthread_cond_timedwait #define gu_cond_signal_SYS pthread_cond_signal #define gu_cond_broadcast_SYS pthread_cond_broadcast #define GU_COND_INITIALIZER_SYS PTHREAD_COND_INITIALIZER #if defined(__APPLE__) /* emulate barriers missing on MacOS */ #ifdef __cplusplus extern "C" { #endif typedef int gu_barrierattr_t_SYS; typedef struct { gu_mutex_t_SYS mutex; gu_cond_t_SYS cond; int count; int tripCount; } gu_barrier_t_SYS; int gu_barrier_init_SYS (gu_barrier_t_SYS *barrier, const gu_barrierattr_t_SYS *attr,unsigned int count); int gu_barrier_destroy_SYS(gu_barrier_t_SYS *barrier); int gu_barrier_wait_SYS (gu_barrier_t_SYS *barrier); #define GU_BARRIER_SERIAL_THREAD_SYS -1 #ifdef __cplusplus } #endif #else /* native POSIX barriers */ typedef pthread_barrierattr_t gu_barrierattr_t_SYS; typedef pthread_barrier_t gu_barrier_t_SYS; #define gu_barrier_init_SYS pthread_barrier_init #define gu_barrier_destroy_SYS pthread_barrier_destroy #define gu_barrier_wait_SYS pthread_barrier_wait #define GU_BARRIER_SERIAL_THREAD_SYS PTHREAD_BARRIER_SERIAL_THREAD #endif /* native POSIX barriers */ #endif /* __unix__ */ /** * Depending on compile-time flags application will either use * normal or debug version of the API calls */ #ifndef GU_DEBUG_MUTEX /* GU_DEBUG_MUTEX not defined - use operating system definitions */ typedef gu_mutex_t_SYS gu_mutex_t; #define gu_mutex_init gu_mutex_init_SYS #define gu_mutex_lock gu_mutex_lock_SYS #define gu_mutex_unlock gu_mutex_unlock_SYS #define gu_mutex_destroy gu_mutex_destroy_SYS #define gu_cond_wait gu_cond_wait_SYS #define gu_cond_timedwait gu_cond_timedwait_SYS #define GU_MUTEX_INITIALIZER GU_MUTEX_INITIALIZER_SYS #else /* GU_DEBUG_MUTEX defined - use custom debug versions of some calls */ typedef struct { gu_mutex_t_SYS mutex; gu_cond_t_SYS cond; gu_thread_t_SYS thread; /* point in source code, where called from */ const char *file; int line; int waiter_count; //!< # of threads waiting for lock int cond_waiter_count; //!< # of threads waiting for some cond bool locked; //!< must be 0 or 1 } gu_mutex_t_DBG; #define GU_MUTEX_INITIALIZER { \ GU_MUTEX_INITIALIZER_SYS, \ GU_COND_INITIALIZER_SYS, \ GU_THREAD_INITIALIZER_SYS, \ __FILE__, \ __LINE__, \ 0, 0, false } #ifdef __cplusplus extern "C" { #endif /** @name Debug versions of basic mutex calls */ /*@{*/ extern int gu_mutex_init_DBG (gu_mutex_t_DBG *mutex, const gu_mutexattr_t_SYS *attr, const char *file, unsigned int line); extern int gu_mutex_lock_DBG (gu_mutex_t_DBG *mutex, const char *file, unsigned int line); extern int gu_mutex_unlock_DBG (gu_mutex_t_DBG *mutex, const char *file, unsigned int line); extern int gu_mutex_destroy_DBG (gu_mutex_t_DBG *mutex, const char *file, unsigned int line); extern int gu_cond_twait_DBG (gu_cond_t_SYS *cond, gu_mutex_t_DBG *mutex, const struct timespec *abstime, const char *file, unsigned int line); #ifdef __cplusplus } // extern "C" #endif static inline int gu_cond_wait_DBG (gu_cond_t_SYS *cond, gu_mutex_t_DBG *mutex, const char *file, unsigned int line) { return gu_cond_twait_DBG(cond, mutex, NULL, file, line); } static inline bool gu_mutex_locked (const gu_mutex_t_DBG* m) { return m->locked; } static inline bool gu_mutex_owned (const gu_mutex_t_DBG* m) { return m->locked && gu_thread_equal_SYS(gu_thread_self_SYS(), m->thread); } static inline void gu_mutex_disown (gu_mutex_t_DBG* m) { memset(&m->thread, 0, sizeof(m->thread)); } /*@}*/ typedef gu_mutex_t_DBG gu_mutex_t; #define gu_mutex_init(M,A) gu_mutex_init_DBG (M,A, __FILE__, __LINE__) #define gu_mutex_lock(M) gu_mutex_lock_DBG (M, __FILE__, __LINE__) #define gu_mutex_unlock(M) gu_mutex_unlock_DBG (M, __FILE__, __LINE__) #define gu_mutex_destroy(M) gu_mutex_destroy_DBG (M, __FILE__, __LINE__) #define gu_cond_wait(S,M) gu_cond_wait_DBG (S,M, __FILE__, __LINE__) #define gu_cond_timedwait(S,M,T) gu_cond_twait_DBG (S,M,T, __FILE__, __LINE__) #endif /* DEBUG_MUTEX */ /* declarations without debug variants */ typedef gu_mutexattr_t_SYS gu_mutexattr_t; typedef gu_thread_t_SYS gu_thread_t; #define gu_thread_create gu_thread_create_SYS #define gu_thread_join gu_thread_join_SYS #define gu_thread_cancel gu_thread_cancel_SYS #define gu_thread_exit gu_thread_exit_SYS #define gu_thread_detach gu_thread_detach_SYS #define gu_thread_self gu_thread_self_SYS #define gu_thread_equal gu_thread_equal_SYS typedef gu_condattr_t_SYS gu_condattr_t; typedef gu_cond_t_SYS gu_cond_t; #define gu_cond_init gu_cond_init_SYS #define gu_cond_destroy gu_cond_destroy_SYS #define gu_cond_signal gu_cond_signal_SYS #define gu_cond_broadcast gu_cond_broadcast_SYS #define GU_COND_INITIALIZER GU_COND_INITIALIZER_SYS typedef gu_barrierattr_t_SYS gu_barrierattr_t; typedef gu_barrier_t_SYS gu_barrier_t; #define gu_barrier_init gu_barrier_init_SYS #define gu_barrier_destroy gu_barrier_destroy_SYS #define gu_barrier_wait gu_barrier_wait_SYS #define GU_BARRIER_SERIAL_THREAD GU_BARRIER_SERIAL_THREAD_SYS #endif /* _gu_mutex_h_ */ galera-4-26.4.25/galerautils/src/gu_crc32c.c000644 000164 177776 00000012113 15107057155 021533 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013-2020 Codership Oy * * $Id$ */ /** * @file Portable software-only implementation of CRC32C algorithm */ #include "gu_crc32c.h" #include "gu_log.h" #include "gu_arch.h" // GU_ASSERT_ALIGNMENT() #include "gu_byteswap.h" // gu_le32() static uint32_t crc32c_lut[8][256]; /* CRC32C lookup tables */ static void crc32c_compute_lut() { static uint32_t const P = 0x82f63b78; /* CRC32C polynomial */ /* Generate LUT 0 */ for (int i = 0; i < 256; i++) { uint32_t val = i; for (int j = 0; j < 8; j++) val = (val >> 1) ^ ((val & 1) * P); crc32c_lut[0][i] = val; } /* Generate LUTs 1 2 3 4 5 6 7 */ for (int j = 0; j < 7; j++) { for (int i = 0; i < 256; i++) { uint32_t const val = crc32c_lut[j][i]; crc32c_lut[j+1][i] = (val >> 8) ^ crc32c_lut[0][val & 0xFF]; } } } #define GU_CRC32C_1BYTE_BLOCK(state, ptr) \ state = (state >> 8) ^ crc32c_lut[0][(state ^ *ptr) & 0xFF]; /** Original one-byte-at-a-time lookup algorithm */ gu_crc32c_t gu_crc32c_sarwate(gu_crc32c_t state, const void* data, size_t len) { const uint8_t* ptr = (const uint8_t*)data; const uint8_t* const end = ptr + len; while (ptr < end) { GU_CRC32C_1BYTE_BLOCK(state, ptr); ptr++; } return state; } /** Unrolled processing of data shorter than 4 bytes */ static inline gu_crc32c_t crc32c_3bytes(gu_crc32c_t state, const uint8_t* ptr, size_t len) { assert(len < 4); switch (len) { case 3: GU_CRC32C_1BYTE_BLOCK(state, ptr); ptr++; /* fall through */ case 2: GU_CRC32C_1BYTE_BLOCK(state, ptr); ptr++; /* fall through */ case 1: GU_CRC32C_1BYTE_BLOCK(state, ptr); /* fall through */ } return state; } /** Process initial misaligned bytes (there can be at most 3) and adjust * ptr pointer and remaining length accordingly. */ static inline gu_crc32c_t crc32c_lead_in(gu_crc32c_t state, const uint8_t** ptr, size_t* len) { assert(*len >= 4); size_t const lead_in = (4 - (intptr_t)(*ptr)) & 0x3; assert((uintptr_t)(*ptr) & 0x3 || lead_in == 0); state = crc32c_3bytes(state, *ptr, lead_in); *len -= lead_in; *ptr += lead_in; return state; } #define GU_CRC32C_4BYTE_BLOCK(state, base) \ state = \ crc32c_lut[base + 3][(state ) & 0xFF] ^ \ crc32c_lut[base + 2][(state >> 8) & 0xFF] ^ \ crc32c_lut[base + 1][(state >> 16) & 0xFF] ^ \ crc32c_lut[base ][(state >> 24) ]; gu_crc32c_t gu_crc32c_slicing_by_4(gu_crc32c_t state, const void* data, size_t len) { const uint8_t* ptr = (const uint8_t*)data; if (len >= 4) { /* handle lead-in misaligned bytes */ state = crc32c_lead_in(state, &ptr, &len); while (len >= 4) { const uint32_t* slice = (const uint32_t*)ptr; GU_ASSERT_ALIGNMENT(*slice); state ^= gu_le32(*slice); GU_CRC32C_4BYTE_BLOCK(state, 0); len -= 4; ptr += 4; } } assert(len < 4); /* handle trailing misalignment */ return crc32c_3bytes(state, ptr, len); } gu_crc32c_t gu_crc32c_slicing_by_8(gu_crc32c_t state, const void* data, size_t len) { const uint8_t* ptr = (const uint8_t*)data; if (len >= 4) { /* handle lead-in misaligned bytes */ state = crc32c_lead_in(state, &ptr, &len); while (len >= 8) { const uint32_t* slices = (const uint32_t*)ptr; GU_ASSERT_ALIGNMENT(*slices); gu_crc32c_t state0 = gu_le32(slices[0]) ^ state; GU_CRC32C_4BYTE_BLOCK(state0, 4); gu_crc32c_t state1 = gu_le32(slices[1]); GU_CRC32C_4BYTE_BLOCK(state1, 0); state = state0 ^ state1; len -= 8; ptr += 8; } if (len >= 4) { const uint32_t* slice = (const uint32_t*)ptr; GU_ASSERT_ALIGNMENT(*slice); state ^= gu_le32(*slice); GU_CRC32C_4BYTE_BLOCK(state, 0); len -= 4; ptr += 4; } } assert(len < 4); /* handle trailing misalignment */ return crc32c_3bytes(state, ptr, len); } static gu_crc32c_func_t crc32c_best_algorithm() { gu_crc32c_func_t ret = NULL; #if !defined(GU_CRC32C_NO_HARDWARE) ret = gu_crc32c_hardware(); #endif if (!ret) { #if defined(__arm__) && GU_WORDSIZE == 32 /* On 32-bit ARM slicing-by-4 seems to outperform slicing-by-8 * by 1.1-1.2x */ gu_info ("CRC-32C: using \"slicing-by-4\" algorithm."); ret = gu_crc32c_slicing_by_4; #else /* On x86 slicing-by-8 seems to outperform slicing-by-4 by 1.2-1.7x */ gu_info ("CRC-32C: using \"slicing-by-8\" algorithm."); ret = gu_crc32c_slicing_by_8; #endif /* __arm__ */ } return ret; } gu_crc32c_func_t gu_crc32c_func = NULL; void gu_crc32c_configure() { crc32c_compute_lut(); gu_crc32c_func = crc32c_best_algorithm(); } galera-4-26.4.25/galerautils/src/SConscript000644 000164 177776 00000006305 15107057155 021635 0ustar00jenkinsnogroup000000 000000 Import('env', 'x86', 'arm64', 'crc32c_no_hardware', 'crc32c_cppflags', 'crc32c_cflags') libgalerautils_env = env.Clone() # Include paths libgalerautils_env.Append(CPPPATH = Split(''' # #common ''')) # C part libgalerautils_sources = [ 'gu_abort.c', 'gu_dbug.c', 'gu_fifo.c', 'gu_lock_step.c', 'gu_log.c', 'gu_mem.c', 'gu_mmh3.c', 'gu_spooky.c', 'gu_rand.c', 'gu_threads.c', 'gu_hexdump.c', 'gu_to.c', 'gu_utils.c', 'gu_uuid.c', 'gu_backtrace.c', 'gu_limits.c', 'gu_time.c', 'gu_init.c' ] libgalerautils_objs = libgalerautils_env.SharedObject(libgalerautils_sources) crc32c_env = libgalerautils_env.Clone() crc32c_env.Append(CPPFLAGS = crc32c_cppflags) crc32c_sources = [ 'gu_crc32c.c' ] if not crc32c_no_hardware: # environment with hardware-specific flags crc32c_hw_env = crc32c_env.Clone() crc32c_hw_env.Append(CFLAGS = crc32c_cflags) if x86: libgalerautils_objs += crc32c_hw_env.SharedObject([ 'gu_crc32c_x86.c' ]) elif arm64: libgalerautils_objs += crc32c_hw_env.SharedObject([ 'gu_crc32c_arm64.c' ]) libgalerautils_objs += crc32c_env.SharedObject(crc32c_sources) libgalerautils_env.StaticLibrary('galerautils', libgalerautils_objs) env.Append(LIBGALERA_OBJS = libgalerautils_objs) libgalerautilsxx_env = env.Clone() # Include paths libgalerautilsxx_env.Append(CPPPATH = Split(''' # #/common ''')) # disable old style cast warnings libgalerautilsxx_env.Append(CXXFLAGS = ' -Wno-old-style-cast') # C++ part libgalerautilsxx_sources = [ 'gu_vlq.cpp', 'gu_datetime.cpp', 'gu_event_service.cpp', 'gu_exception.cpp', 'gu_serialize.cpp', 'gu_logger.cpp', 'gu_regex.cpp', 'gu_string_utils.cpp', 'gu_uri.cpp', 'gu_buffer.cpp', 'gu_utils++.cpp', 'gu_gtid.cpp', 'gu_config.cpp', 'gu_fdesc.cpp', 'gu_mmap.cpp', 'gu_alloc.cpp', 'gu_rset.cpp', 'gu_resolver.cpp', 'gu_histogram.cpp', 'gu_signals.cpp', 'gu_stats.cpp', 'gu_asio.cpp', 'gu_asio_datagram.cpp', 'gu_asio_stream_react.cpp', 'gu_asio_stream_engine.cpp', 'gu_debug_sync.cpp', 'gu_thread.cpp' ] #libgalerautilsxx_objs = libgalerautilsxx_env.Object( # libgalerautilsxx_sources) libgalerautilsxx_sobjs = libgalerautilsxx_env.SharedObject( libgalerautilsxx_sources) hexdump_sobj = libgalerautilsxx_env.SharedObject('gu_hexdump++','gu_hexdump.cpp') uuid_sobj = libgalerautilsxx_env.SharedObject('gu_uuid++','gu_uuid.cpp') libgalerautilsxx_sobjs += hexdump_sobj libgalerautilsxx_sobjs += uuid_sobj #if '-DGALERA_USE_GU_NETWORK' in libgalerautils_env['CPPFLAGS']: # libgalerautilsxx_sources = [libgalerautilsxx_sources, # 'gu_resolver.cpp'] libgalerautilsxx_env.StaticLibrary('galerautils++', libgalerautilsxx_sobjs) env.Append(LIBGALERA_OBJS = libgalerautilsxx_sobjs) galera-4-26.4.25/galerautils/src/gu_uri.hpp000644 000164 177776 00000015255 15107057155 021632 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy * * $Id$ */ /*! * @file gu_url.hpp * * @brief Utility to parse URIs * * Follows http://tools.ietf.org/html/rfc3986 * * @author Teemu Ollakka */ #ifndef __GU_URI_HPP__ #define __GU_URI_HPP__ #include #include #include #include "gu_utils.hpp" #include "gu_regex.hpp" namespace gu { /*! * @brief URIQueryList * * std::multimap is used to implement query list in URI. * @todo This should be changed to real class having get_key(), * get_value() methods for iterators and to get rid of std::multimap * dependency in header. */ typedef std::multimap URIQueryList; /*! * @brief Utility class to parse URIs */ class URI { public: /*! * @class Helper class for authority list representation. */ class Authority { public: /*! * @brief Get "user" part of authority * * @return user substring * @throws NotSet */ const std::string& user() const { return user_.str(); } /*! * @brief Get "host" part of authority * * @return host substring * @throws NotSet */ const std::string& host() const { return host_.str(); } /*! * @brief Get "port" part of authority * * @return port * @throws NotSet */ const std::string& port() const { return port_.str(); } private: friend class gu::URI; Authority() : user_(), host_(), port_() { } RegEx::Match user_; RegEx::Match host_; RegEx::Match port_; }; typedef std::vector AuthorityList; /*! * @brief Construct URI from string * * @param strict if true, throw exception when scheme is not found, * else use a default one * @throws std::invalid_argument if URI is not valid * @throws std::logic_error in case of internal error * @throws NotSet */ URI (const std::string&, bool strict = true); /*! * @brief Get URI string * @return URI string */ const std::string& to_string() const { if (modified_) recompose(); return str_; } /*! * @brief Get URI scheme * * @return URI scheme (always set) * @throws NotSet */ const std::string& get_scheme() const { return scheme_.str(); } /*! * @brief Get URI authority component * * @return URI authority substring * @throws NotSet */ std::string get_authority() const; /*! * @brief Get "user" part of the first entry in authority list * * @return User substring * @throws NotSet */ const std::string& get_user() const { if (authority_.empty()) throw NotSet(); return authority_.front().user(); } /*! * @brief Get "host" part of the first entry in authority list * * @return Host substring * @throws NotSet */ const std::string& get_host() const { if (authority_.empty()) throw NotSet(); return authority_.front().host(); } /*! * @brief Get "port" part of the first entry in authority list * * @return Port substring * @throws NotSet */ const std::string& get_port() const { if (authority_.empty()) throw NotSet(); return authority_.front().port(); } /*! * @brief Get authority list * * @return Authority list */ const AuthorityList& get_authority_list() const { return authority_; } /*! * @brief Get URI path * * @return URI path (always set) */ const std::string& get_path() const { return path_.str(); } /*! * @brief Get URI path * * @return URI path * @throws NotSet */ const std::string& get_fragment() const { return fragment_.str(); } /*! * @brief Add query param to URI */ void set_query_param(const std::string&, const std::string&, bool override); void set_option(const std::string& key, const std::string& val) { set_query_param(key, val, true); } void append_option(const std::string& key, const std::string& val) { set_query_param(key, val, false); } /*! * @brief Get URI query list */ const URIQueryList& get_query_list() const { return query_list_; } /*! * @brief return opton by name, * @throws NotFound */ const std::string& get_option(const std::string&) const; const std::string& get_option(const std::string& opt, const std::string& def) const { try { return get_option(opt); } catch (NotFound& ) { return def ; } } private: bool modified_; mutable std::string str_; /*! URI string */ RegEx::Match scheme_; /*! URI scheme part */ AuthorityList authority_; RegEx::Match path_; /*! URI path part */ RegEx::Match fragment_; /*! URI fragment part */ URIQueryList query_list_; /*! URI query list */ /*! * @brief Parse URI from str */ void parse (const std::string& s, bool strict); /*! * @brief Recompose URI in str */ void recompose() const; /*! @throws NotSet */ std::string get_authority(const Authority&) const; static const char* const uri_regex_; /*! regexp string to parse URI */ static RegEx const regex_; /*! URI regexp parser */ }; inline std::ostream& operator<<(std::ostream& os, const URI& uri) { os << uri.to_string(); return os; } } #endif /* __GU_URI_HPP__ */ galera-4-26.4.25/galerautils/src/gu_asio_io_service_impl.hpp000644 000164 177776 00000001702 15107057155 025206 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @file gu_asio_io_service.hpp * * Asio IO service implementation. */ #ifndef GU_ASIO_IO_SERVICE_HPP #define GU_ASIO_IO_SERVICE_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio.hpp" #include "asio/io_service.hpp" #ifdef GALERA_HAVE_SSL #include "asio/ssl.hpp" #endif // GALERA_HAVE_SSL namespace gu { // // IO Service implementation, wraps asio types. // class AsioIoService::Impl { public: Impl() : io_service_() #ifdef GALERA_HAVE_SSL , ssl_context_() #endif // GALERA_HAVE_SSL { } asio::io_service& native() { return io_service_; } private: asio::io_service io_service_; public: #ifdef GALERA_HAVE_SSL std::unique_ptr ssl_context_; #endif // GALERA_HAVE_SSL }; } #endif // GU_ASIO_IO_SERVICE_HPP galera-4-26.4.25/galerautils/src/gu_system.h000644 000164 177776 00000001512 15107057155 022006 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @system/os/platform dependent functions/macros * * $Id$ */ #ifndef _gu_system_h_ #define _gu_system_h_ #define _GNU_SOURCE // program_invocation_name, program_invocation_short_name #include #include // getexecname, getprogname #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /* See: http://lists.gnu.org/archive/html/bug-gnulib/2010-12/txtrjMzutB7Em.txt * for implementation of GU_SYS_PROGRAM_NAME on other platforms */ #if defined(__sun__) # define GU_SYS_PROGRAM_NAME getexecname () #elif defined(__APPLE__) || defined(__FreeBSD__) # define GU_SYS_PROGRAM_NAME getprogname () #elif defined(__linux__) # define GU_SYS_PROGRAM_NAME program_invocation_name #endif #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_system_h_ */ galera-4-26.4.25/galerautils/src/gu_macros.hpp000644 000164 177776 00000001407 15107057155 022311 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2017 Codership Oy /** * @file Miscellaneous C++-related macros * * $Id$ */ #ifndef _gu_macros_hpp_ #define _gu_macros_hpp_ #include "gu_macros.h" /* To protect against "old-style" casts in libc macros * must be included after respective libc headers */ #if defined(SIG_IGN) extern "C" { static void (* const GU_SIG_IGN)(int) = SIG_IGN; } #endif namespace gu { template struct CompileAssert {}; } /* namespace gu */ #define GU_COMPILE_ASSERT(expr,msg) \ typedef gu::CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] __attribute__((unused)) /* For C++11 compatibility */ #if __cplusplus >= 201103L # define GU_NOEXCEPT(x) noexcept(x) #else # define GU_NOEXCEPT(x) #endif #endif /* _gu_macros_hpp_ */ galera-4-26.4.25/galerautils/src/gu_byteswap.h000644 000164 177776 00000006244 15107057155 022327 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /** * @file Byte swapping functions/macros * * $Id$ */ #ifndef _gu_byteswap_h_ #define _gu_byteswap_h_ #include "gu_arch.h" #include "gu_types.h" #include "gu_macros.h" /* * Platform-dependent macros */ #if defined(_MSC_VER) #include #define GU_ROTL32(x,y) _rotl(x,y) #define GU_ROTL64(x,y) _rotl64(x,y) #else /* !defined(_MSC_VER) */ static GU_FORCE_INLINE uint32_t GU_ROTL32 (uint32_t x, int8_t r) { return (x << r) | (x >> (32 - r)); } static GU_FORCE_INLINE uint64_t GU_ROTL64 (uint64_t x, int8_t r) { return (x << r) | (x >> (64 - r)); } #endif /* !defined(_MSC_VER) */ /* * End of paltform-dependent macros */ #if defined(HAVE_BYTESWAP_H) # include // for bswap_16(x), bswap_32(x), bswap_64(x) #elif defined(__APPLE__) # include // for OSSwapInt16(x), etc. #endif /* HAVE_BYTESWAP_H */ #if defined(__APPLE__) /* do not use OSSwapIntXX, because gcc44 gives old-style cast warnings */ # define gu_bswap16 _OSSwapInt16 # define gu_bswap32 _OSSwapInt32 # define gu_bswap64 _OSSwapInt64 #elif defined(__sun__) # define gu_bswap16 BSWAP_16 # define gu_bswap32 BSWAP_32 # define gu_bswap64 BSWAP_64 #elif defined(bswap16) # define gu_bswap16 bswap16 # define gu_bswap32 bswap32 # define gu_bswap64 bswap64 #elif defined(bswap_16) # define gu_bswap16 bswap_16 # define gu_bswap32 bswap_32 # define gu_bswap64 bswap_64 #else # error "No byteswap macros are defined" #endif /* @note: there are inline functions behind these macros below, * so typesafety is taken care of... However C++ still has issues: */ #ifdef __cplusplus // To pacify C++. Not loosing much optimization on 2 bytes anyways. #include #undef gu_bswap16 static GU_FORCE_INLINE uint16_t gu_bswap16(uint16_t const x) // Even though x is declared as 'uint16_t', g++-4.4.1 still treats results // of operations with it as 'int' and freaks out on return with -Wconversion. { return static_cast((x >> 8) | (x << 8)); } #endif // __cplusplus #if defined(GU_LITTLE_ENDIAN) /* convert to/from Little Endian representation */ #define gu_le16(x) (x) #define gu_le32(x) (x) #define gu_le64(x) (x) /* convert to/from Big Endian representation */ #define gu_be16(x) gu_bswap16(x) #define gu_be32(x) gu_bswap32(x) #define gu_be64(x) gu_bswap64(x) #else /* Big-Endian */ /* convert to/from Little Endian representation */ #define gu_le16(x) gu_bswap16(x) #define gu_le32(x) gu_bswap32(x) #define gu_le64(x) gu_bswap64(x) /* convert to/from Big Endian representation */ #define gu_be16(x) (x) #define gu_be32(x) (x) #define gu_be64(x) (x) #endif /* Big-Endian */ /* Analogues to htonl and friends. Since we'll be dealing mostly with * little-endian architectures, there is more sense to use little-endian * as default */ #define htogs(x) gu_le16(x) #define gtohs(x) htogs(x) #define htogl(x) gu_le32(x) #define gtohl(x) htogl(x) /* Analogues to htogs() and friends, suffixed with type width */ #define htog16(x) gu_le16(x) #define gtoh16(x) htog16(x) #define htog32(x) gu_le32(x) #define gtoh32(x) htog32(x) #define htog64(x) gu_le64(x) #define gtoh64(x) htog64(x) #endif /* _gu_byteswap_h_ */ galera-4-26.4.25/galerautils/src/gu_thread.hpp000644 000164 177776 00000005053 15107057155 022275 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2016-2017 Codership Oy // // // Threading utilities // #ifndef GU_THREAD_HPP #define GU_THREAD_HPP #include "gu_threads.h" #include namespace gu { // // Wrapper class for thread scheduling parameters. For details, // about values see sched_setscheduler() and pthread_setschedparams() // documentation. // class ThreadSchedparam { public: // // Default constructor. Initializes to default system // scheduling parameters. // ThreadSchedparam() : policy_(SCHED_OTHER), prio_ (0) { } // // Construct ThreadSchedparam from given policy and priority // integer values. // ThreadSchedparam(int policy, int prio) : policy_(policy), prio_ (prio) { } // // Construct ThreadSchedparam from given string representation // which must have form of // // : // // wehre policy is one of "other", "fifo", "rr" and priority // is an integer. // ThreadSchedparam(const std::string& param); // Return scheduling policy int policy() const { return policy_; } // Return scheduling priority int prio() const { return prio_ ; } // Equal to operator overload bool operator==(const ThreadSchedparam& other) const { return (policy_ == other.policy_ && prio_ == other.prio_); } // Not equal to operator overload bool operator!=(const ThreadSchedparam& other) const { return !(*this == other); } // Default system ThreadSchedparam static ThreadSchedparam system_default; void print(std::ostream& os) const; private: int policy_; // Scheduling policy int prio_; // Scheduling priority }; // // Return current scheduling parameters for given thread // ThreadSchedparam thread_get_schedparam(gu_thread_t thread); // // Set scheduling parameters for given thread. // // Throws gu::Exception if setting parameters fails. // void thread_set_schedparam(gu_thread_t thread, const ThreadSchedparam&); // // Insertion operator for ThreadSchedparam // inline std::ostream& operator<<(std::ostream& os, const gu::ThreadSchedparam& sp) { sp.print(os); return os; } } #endif // GU_THREAD_HPP galera-4-26.4.25/galerautils/src/gu_byteswap.hpp000644 000164 177776 00000003305 15107057155 022662 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2017 Codership Oy /** * @file Endian conversion templates for serialization * * $Id$ */ #ifndef _gu_byteswap_hpp_ #define _gu_byteswap_hpp_ #include "gu_byteswap.h" #include "gu_macros.hpp" // GU_COMPILE_ASSERT #include namespace gu { /* General template utility class: undefined */ template class gtoh_template_helper { public: static T f(T val) { // to generate error on compilation stage rather then linking return val.this_template_use_is_not_supported(); } }; /* Utility argument size-specialized templates, don't use directly */ template class gtoh_template_helper { GU_COMPILE_ASSERT(1 == sizeof(T), gtoh_wrong_argument_size1); public: static GU_FORCE_INLINE T f(T val) { return val; } }; template class gtoh_template_helper { GU_COMPILE_ASSERT(2 == sizeof(T), gtoh_wrong_argument_size2); public: static GU_FORCE_INLINE T f(T val) { return gtoh16(val); } }; template class gtoh_template_helper { GU_COMPILE_ASSERT(4 == sizeof(T), gtoh_wrong_argument_size4); public: static GU_FORCE_INLINE T f(T val) { return gtoh32(val); } }; template class gtoh_template_helper { GU_COMPILE_ASSERT(8 == sizeof(T), gtoh_wrong_argument_size8); public: static GU_FORCE_INLINE T f(T val) { return gtoh64(val); } }; /* Proper generic byteswap templates for general use */ template GU_FORCE_INLINE T gtoh (const T& val) { return gtoh_template_helper::f(val); } template T htog (const T& val) { return gtoh(val); } } /* namespace gu */ #endif /* _gu_byteswap_hpp_ */ galera-4-26.4.25/galerautils/src/gu_conf.h000644 000164 177776 00000001125 15107057155 021407 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy /** * @file * Configuration interface for libgalerautils * * $Id$ */ #ifndef _gu_conf_h_ #define _gu_conf_h_ #ifdef __cplusplus extern "C" { #endif /* Logging options */ #include #include "gu_log.h" extern int gu_conf_set_log_file (FILE* file); extern int gu_conf_set_log_callback (gu_log_cb_t callback); extern int gu_conf_self_tstamp_on (); extern int gu_conf_self_tstamp_off (); extern int gu_conf_debug_on (); extern int gu_conf_debug_off (); #ifdef __cplusplus } #endif #endif // _gu_conf_h_ galera-4-26.4.25/galerautils/src/gu_backtrace.hpp000644 000164 177776 00000002565 15107057155 022752 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy #ifndef GU_BACKTRACE_HPP #define GU_BACKTRACE_HPP #include "gu_backtrace.h" #include #include namespace gu { /*! * Utility class to print backtraces. */ class Backtrace { public: /*! * Construct backtrace object. * * @param Maximum number of backtrace symbols resolved (default 50). */ Backtrace(int size = 50) : symbols_size_(size), symbols_(gu_backtrace(&symbols_size_)) { } ~Backtrace() { free(symbols_); } /*! * Print backtrace into ostream. * * @param os Ostream to print backtrace into. * @param delim Delimiter separating backtrace symbols. */ void print(std::ostream& os, char delim = '\n') { if (symbols_ != 0) { for (int i(0); i < symbols_size_; ++i) { os << symbols_[i] << delim; } } else { os << "no backtrace available"; } } private: Backtrace(const Backtrace&); void operator=(const Backtrace&); int symbols_size_; char** symbols_; }; } #endif // GU_BACKTRACE_HPP galera-4-26.4.25/galerautils/src/gu_status.hpp000644 000164 177776 00000001650 15107057155 022350 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2014 Codership Oy //! // @file // Common class for gathering Galera wide status. The class is simple // string based key-value store. // #ifndef GU_STATUS_HPP #define GU_STATUS_HPP #include "gu_exception.hpp" #include #include namespace gu { class Status { public: typedef std::map VarMap; typedef VarMap::iterator iterator; typedef VarMap::const_iterator const_iterator; Status() : vars_() { } void insert(const std::string& key, const std::string& val) { vars_.insert(std::make_pair(key, val)); } const_iterator begin() { return vars_.begin(); } const_iterator end() { return vars_.end(); } size_t size() const { return vars_.size(); } private: VarMap vars_; }; } #endif // !GU_STATUS_HPP galera-4-26.4.25/galerautils/src/gu_gtid.cpp000644 000164 177776 00000001102 15107057155 021737 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015-2017 Codership Oy */ #include "gu_gtid.hpp" #include "gu_throw.hpp" #include void gu::GTID::print(std::ostream& os) const { os << uuid_ << ':' << seqno_; } void gu::GTID::scan(std::istream& is) { UUID u; char c; seqno_t s; try { is >> u >> c >> s; } catch (std::exception& e) { gu_throw_error(EINVAL) << e.what(); } if (c != ':') { gu_throw_error(EINVAL) << "Malformed GTID: '" << u << c << s << '\''; } uuid_ = u; seqno_ = s; } galera-4-26.4.25/galerautils/src/gu_lock_step.h000644 000164 177776 00000001651 15107057155 022451 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy * * $Id$ */ // This is a small class to facilitate lock-stepping in multithreaded unit tests #ifndef _gu_lock_step_h_ #define _gu_lock_step_h_ #include #include "gu_threads.h" typedef struct gu_lock_step { gu_mutex_t mtx; gu_cond_t cond; long wait; long cont; bool enabled; } gu_lock_step_t; extern void gu_lock_step_init (gu_lock_step_t* ls); /* enable or disable lock-stepping */ extern void gu_lock_step_enable (gu_lock_step_t* ls, bool enable); extern void gu_lock_step_wait (gu_lock_step_t* ls); /* returns how many waiters there were, * waits for timeout_ms milliseconds if no waiters, if timeout_ms < 0 waits forever, * if 0 - no wait at all */ extern long gu_lock_step_cont (gu_lock_step_t* ls, long timeout_ms); extern void gu_lock_step_destroy (gu_lock_step_t* ls); #endif /* _gu_lock_step_h_ */ galera-4-26.4.25/galerautils/src/gu_event_service.hpp000644 000164 177776 00000002266 15107057155 023672 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2021 Codership Oy // /** * Event service class */ #ifndef GALERA_EVENT_SERVICE_HPP #define GALERA_EVENT_SERVICE_HPP #include "wsrep_event_service.h" #include #include namespace gu { class EventService { public: static int init_v1(const wsrep_event_service_v1_t*); static void deinit_v1(); static void callback(const std::string& name, const std::string& value) { std::lock_guard lock(EventService::mutex); if (instance && instance->cb_) { instance->cb_(instance->ctx_, name.c_str(), value.c_str()); } } private: wsrep_event_context_t* const ctx_; wsrep_event_cb_t const cb_; static std::mutex mutex; static size_t usage; static EventService* instance; EventService(wsrep_event_context_t* ctx, wsrep_event_cb_t cb) : ctx_(ctx), cb_(cb) {} ~EventService() {} EventService(const EventService&); EventService& operator =(EventService); }; } /* galera */ #endif /* GALERA_EVENT_SERVICE_HPP */ galera-4-26.4.25/galerautils/src/gu_log.h000644 000164 177776 00000006170 15107057155 021250 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2014 Codership Oy /** * @file Logging API * * $Id$ */ #ifndef _gu_log_h_ #define _gu_log_h_ #include "gu_macros.h" #include /* For NULL */ #if defined(__cplusplus) extern "C" { #endif /** * @typedef * Defines severity classes for log messages: * FATAL - is a fatal malfunction of the library which cannot be recovered * from. Application must close. * error - error condition in the library which prevents further normal * operation but can be recovered from by the application. E.g. EAGAIN. * warn - some abnormal condition which library can recover from by itself. * * info - just an informative log message. * * debug - debugging message. */ typedef enum gu_log_severity { GU_LOG_FATAL, GU_LOG_ERROR, GU_LOG_WARN, GU_LOG_INFO, GU_LOG_DEBUG } gu_log_severity_t; /** * @typedef * Defines a type of callback function that application can provide * to do the logging */ typedef void (*gu_log_cb_t) (int severity, const char* msg); /** Helper for macros defined below. Should not be called directly. */ extern int gu_log (gu_log_severity_t severity, const char* file, const char* function, const int line, const char* fmt, ...) __attribute__((format(printf, 5, 6))); /** This variable is made global only for the purpose of using it in * gu_debug() macro and avoid calling gu_log() when debug is off. * Don't use it directly! */ extern gu_log_severity_t gu_log_max_level; #define gu_log_debug (GU_LOG_DEBUG == gu_log_max_level) #if defined(__cplusplus) } #endif #if !defined(__cplusplus) || defined(GALERA_LOG_H_ENABLE_CXX) /** * @name Logging macros. * Must be implemented as macros to report the location of the code where * they are called. */ /*@{*/ #define gu_fatal(...) \ gu_log(GU_LOG_FATAL, __FILE__, __func__, __LINE__, __VA_ARGS__); #define gu_error(...) \ gu_log(GU_LOG_ERROR, __FILE__, __func__, __LINE__, __VA_ARGS__); #define gu_warn(...) \ gu_log(GU_LOG_WARN, __FILE__, __func__, __LINE__, __VA_ARGS__); #define gu_info(...) \ gu_log(GU_LOG_INFO, __FILE__, __func__, __LINE__, __VA_ARGS__) #define gu_debug(...) \ if (gu_unlikely(gu_log_debug)) \ { \ gu_log(GU_LOG_DEBUG, __FILE__, __func__, __LINE__, __VA_ARGS__); \ } /*@}*/ #endif /* __cplusplus */ #endif /* _gu_log_h_ */ #ifdef __GU_LOGGER__ // C++ logger should use the same stuff, so export it #ifndef _gu_log_extra_ #define _gu_log_extra_ extern "C" { extern bool gu_log_self_tstamp; extern gu_log_cb_t gu_log_cb; extern void gu_log_cb_default (int, const char*); extern const char* gu_log_level_str[]; } #endif /* _gu_log_extra_ */ #endif /* __GU_LOGGER__ */ galera-4-26.4.25/galerautils/src/gu_monitor.hpp000644 000164 177776 00000003322 15107057155 022512 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2017 Codership Oy * * $Id$ */ /*! * @file gu_monitor.hpp * * */ #ifndef __GU_MONITOR_HPP__ #define __GU_MONITOR_HPP__ #include #include namespace gu { class Monitor; class Critical; } class gu::Monitor { int mutable refcnt; Mutex mutex; Cond cond; #ifndef NDEBUG gu_thread_t mutable holder; #endif // copy contstructor and operator= disabled by mutex and cond members. // but on Darwin, we got an error 'class gu::Monitor' has pointer data members // so make non-copyable explicitly Monitor(const Monitor&); void operator=(const Monitor&); public: #ifndef NDEBUG Monitor() : refcnt(0), mutex(), cond(), holder(0) {} #else Monitor() : refcnt(0), mutex(), cond() {} #endif ~Monitor() {} void enter() const { Lock lock(mutex); // Teemu, pthread_equal() check seems redundant, refcnt too (counted in cond) // while (refcnt > 0 && pthread_equal(holder, pthread_self()) == 0) while (refcnt) { lock.wait(cond); } refcnt++; #ifndef NDEBUG holder = gu_thread_self(); #endif } void leave() const { Lock lock(mutex); assert(refcnt > 0); assert(gu_thread_equal(holder, gu_thread_self()) != 0); refcnt--; if (refcnt == 0) { cond.signal(); } } }; class gu::Critical { const Monitor& mon; Critical (const Critical&); Critical& operator= (const Critical&); public: Critical(const Monitor& m) : mon(m) { mon.enter(); } ~Critical() { mon.leave(); } }; #endif /* __GU_MONITOR_HPP__ */ galera-4-26.4.25/galerautils/src/gu_array.hpp000644 000164 177776 00000001645 15107057155 022147 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2017 Codership Oy // // // Define gu::array through either std::array or boost::array // // Because of the lack of alias template in C++ a workaround of defining // the type inside the struct array is used. // // For example, defining gu::array type for type T is done like: // // typedef gu::array::type A; // // #ifndef GU_ARRAY_HPP #define GU_ARRAY_HPP #if defined(HAVE_STD_ARRAY) # include # define GU_ARRAY_NAMESPACE std #elif defined(HAVE_TR1_ARRAY) # include # define GU_ARRAY_NAMESPACE std::tr1 #elif defined(HAVE_BOOST_ARRAY_HPP) # include # define GU_ARRAY_NAMESPACE boost #else #error No supported array headers #endif namespace gu { template struct array { typedef GU_ARRAY_NAMESPACE::array type; }; } #undef GU_ARRAY_NAMESPACE #endif // GU_SHARED_PTR_HPP galera-4-26.4.25/galerautils/src/gu_errno.h000644 000164 177776 00000001313 15107057155 021606 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef GU_ERRNO_H #define GU_ERRNO_H #include #if defined(__APPLE__) || defined(__FreeBSD__) # define GU_ELAST ELAST #else /* must be high enough to not collide with system errnos but lower than 256 */ # define GU_ELAST 200 #endif #ifndef EBADFD # define EBADFD (GU_ELAST+1) #endif #ifndef EREMCHG # define EREMCHG (GU_ELAST+2) #endif #ifndef ENOTUNIQ # define ENOTUNIQ (GU_ELAST+3) #endif #ifndef ERESTART # define ERESTART (GU_ELAST+4) #endif #ifndef ENOTRECOVERABLE # define ENOTRECOVERABLE (GU_ELAST+5) #endif #ifndef ENODATA # define ENODATA (GU_ELAST+6) #endif #endif /* GU_STR_H */ galera-4-26.4.25/galerautils/src/gu_hexdump.cpp000644 000164 177776 00000001521 15107057155 022467 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file operator << for hexdump - definiton * * $Id$ */ #include "gu_hexdump.hpp" #include "gu_hexdump.h" #include "gu_logger.hpp" namespace gu { static size_t const hexdump_bytes_per_go(GU_HEXDUMP_BYTES_PER_LINE * 2); static size_t const hexdump_reserve_string( hexdump_bytes_per_go*2 /* chars */ + hexdump_bytes_per_go/4 /* whitespace */ + 1 /* \0 */ ); std::ostream& Hexdump::to_stream (std::ostream& os) const { char str[hexdump_reserve_string]; size_t off(0); while (off < size_) { size_t const to_print(std::min(size_ - off, hexdump_bytes_per_go)); gu_hexdump (buf_ + off, to_print, str, sizeof(str), alpha_); off += to_print; os << str; if (off < size_) os << '\n'; } return os; } } // namespace gu galera-4-26.4.25/galerautils/src/gu_hexdump.c000644 000164 177776 00000003623 15107057155 022134 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2013 Codership Oy /** * @file Functions to dump buffer contents in a readable form * * $Id$ */ #include "gu_hexdump.h" #include "gu_macros.h" #define GU_ASCII_0 0x30 #define GU_ASCII_10 0x3a #define GU_ASCII_A 0x41 #define GU_ASCII_a 0x61 #define GU_ASCII_A_10 (GU_ASCII_A - GU_ASCII_10) #define GU_ASCII_a_10 (GU_ASCII_a - GU_ASCII_10) static GU_FORCE_INLINE int _hex_code (uint8_t const x) { return (x + GU_ASCII_0 + (x > 9)*GU_ASCII_a_10); } static GU_FORCE_INLINE void _write_byte_binary (char* const str, uint8_t const byte) { str[0] = _hex_code(byte >> 4); str[1] = _hex_code(byte & 0x0f); } static GU_FORCE_INLINE void _write_byte_alpha (char* const str, uint8_t const byte) { str[0] = (char)byte; str[1] = '.'; } #define GU_ASCII_ALPHA_START 0x20U /* ' ' */ #define GU_ASCII_ALPHA_END 0x7eU /* '~' */ #define GU_ASCII_ALPHA_INTERVAL (GU_ASCII_ALPHA_END - GU_ASCII_ALPHA_START) static GU_FORCE_INLINE bool _byte_is_alpha (uint8_t const byte) { return (byte - GU_ASCII_ALPHA_START <= GU_ASCII_ALPHA_INTERVAL); } /*! Dumps contents of the binary buffer into a readable form */ void gu_hexdump(const void* buf, ssize_t const buf_size, char* str, ssize_t str_size, bool alpha) { const uint8_t* b = (uint8_t*)buf; ssize_t i; str_size--; /* reserve a space for \0 */ for (i = 0; i < buf_size && str_size > 1;) { if (alpha && _byte_is_alpha (b[i])) _write_byte_alpha (str, b[i]); else _write_byte_binary (str, b[i]); str += 2; str_size -= 2; i++; if (0 == (i % 4) && str_size > 0 && i < buf_size) { /* insert space after every 4 bytes and newline after every 32 */ str[0] = (i % GU_HEXDUMP_BYTES_PER_LINE) ? ' ' : '\n'; str_size--; str++; } } str[0] = '\0'; } galera-4-26.4.25/galerautils/src/gu_limits.h000644 000164 177776 00000002562 15107057155 021771 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2008-2016 Codership Oy /** * @file system limit macros * * $Id$ */ #ifndef _gu_limits_h_ #define _gu_limits_h_ #include #ifdef __cplusplus extern "C" { #endif extern size_t gu_page_size(void); extern size_t gu_phys_pages(void); extern size_t gu_avphys_pages(void); #ifdef __cplusplus } // extern "C" #endif #define GU_PAGE_SIZE gu_page_size() /* returns multiple of page size that is no less than page size */ static inline size_t gu_page_size_multiple(size_t const requested_size) { size_t const sys_page_size = GU_PAGE_SIZE; size_t const multiple = requested_size / sys_page_size; return sys_page_size * (0 == multiple ? 1 : multiple); } static inline size_t gu_avphys_bytes() { // to detect overflow on systems with >4G of RAM, see #776 unsigned long long avphys = gu_avphys_pages(); avphys *= gu_page_size(); size_t max = -1; return (avphys < max ? avphys : max); } #include #define GU_ULONG_MAX ULONG_MAX #define GU_LONG_MAX LONG_MAX #define GU_LONG_MIN LONG_MIN #ifdef ULLONG_MAX #define GU_ULLONG_MAX ULLONG_MAX #define GU_LLONG_MAX LLONG_MAX #define GU_LLONG_MIN LLONG_MIN #else #define GU_ULLONG_MAX 0xffffffffffffffffULL #define GU_LLONG_MAX 0x7fffffffffffffffLL #define GU_LLONG_MIN (-GU_LONG_LONG_MAX - 1) #endif #define GU_MIN_ALIGNMENT 8 #endif /* _gu_limits_h_ */ galera-4-26.4.25/galerautils/src/gu_gtid.hpp000644 000164 177776 00000006671 15107057155 021764 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015-2017 Codership Oy */ #ifndef _gu_gtid_hpp_ #define _gu_gtid_hpp_ #include "gu_uuid.hpp" #include "gu_serialize.hpp" #include "gu_hash.h" #include namespace gu { class GTID; typedef int64_t seqno_t; } /* namespace gu */ class gu::GTID { public: static seqno_t const SEQNO_UNDEFINED = -1; GTID() : uuid_(), seqno_(SEQNO_UNDEFINED) {} GTID(const UUID& u, seqno_t s) : uuid_(u), seqno_(s) {} GTID(const gu_uuid_t& u, seqno_t s) : uuid_(u), seqno_(s) {} GTID(const GTID& g) : uuid_(g.uuid_), seqno_(g.seqno_) {} GTID(const void* const buf, size_t const buflen) : uuid_ (), seqno_(SEQNO_UNDEFINED) { (void) unserialize(buf, buflen, 0); } // this constuftor modifies offset GTID(const void* const buf, size_t const buflen, size_t& offset) : uuid_ (), seqno_(SEQNO_UNDEFINED) { offset = unserialize(buf, buflen, offset); } GTID& operator=(const GTID& other) = default; const UUID& uuid() const { return uuid_; } seqno_t seqno() const { return seqno_; } void set(const gu::UUID& u) { uuid_ = u; } void set(seqno_t const s) { seqno_ = s; } void set(const gu::UUID& u, seqno_t const s) { set(u); set(s); } bool operator==(const GTID& other) const { return (seqno_ == other.seqno_ && uuid_ == other.uuid_); } bool operator!=(const GTID& other) const { return !(*this == other); } bool is_undefined() const { static GTID undefined; return *this == undefined; } void print(std::ostream& os) const; void scan(std::istream& is); static size_t serial_size() { return UUID::serial_size() +sizeof(int64_t); } size_t serialize(void* const buf, size_t offset) const { assert(serial_size() == (uuid_.serial_size() + sizeof(int64_t))); offset = uuid_.serialize(buf, offset); offset = gu::serialize8(seqno_, buf, offset); return offset; } size_t unserialize(const void* const buf, size_t offset) { assert(serial_size() == (uuid_.serial_size() + sizeof(seqno_))); offset = uuid_.unserialize(buf, offset); offset = gu::unserialize8(buf, offset, seqno_); return offset; } size_t unserialize(const void* const buf, const size_t buflen, const size_t offset) { gu_trace(gu::check_bounds(offset + serial_size(), buflen)); return unserialize(buf, offset); } size_t serialize (void* const buf, const size_t buflen, const size_t offset) const { gu_trace(gu::check_bounds(offset + serial_size(), buflen)); return serialize(buf, offset); } class TableHash // for std::map, does not have to be endian independent { public: size_t operator()(const GTID& gtid) const { // UUID is 16 bytes and seqno_t is 8 bytes so all should be // properly aligned into a continuous buffer return gu_table_hash(>id, sizeof(UUID) + sizeof(seqno_t)); } }; private: UUID uuid_; seqno_t seqno_; }; /* class GTID */ namespace gu { inline std::ostream& operator<< (std::ostream& os, const GTID& gtid) { gtid.print(os); return os; } inline std::istream& operator>> (std::istream& is, GTID& gtid) { gtid.scan(is); return is; } } /* namespace gu */ #endif /* _gu_gtid_hpp_ */ galera-4-26.4.25/galerautils/src/gu_asio_socket_util.hpp000644 000164 177776 00000010455 15107057155 024370 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // /** @gile gu_asio_socket_util.hpp * * Common utility functions for asio sockets. */ #ifndef GU_ASIO_SOCKET_UTIL_HPP #define GU_ASIO_SOCKET_UTIL_HPP #include "gu_throw.hpp" #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "gu_asio_ip_address_impl.hpp" #include "asio/ip/tcp.hpp" #include "asio/version.hpp" template int native_socket_handle(S& socket) { #if ASIO_VERSION < 101401 return socket.native(); #else return socket.native_handle(); #endif } template static void set_fd_options(S& socket) { long flags(FD_CLOEXEC); if (fcntl(native_socket_handle(socket), F_SETFD, flags) == -1) { gu_throw_system_error(errno) << "failed to set FD_CLOEXEC"; } } template static void set_socket_options(Socket& socket) { set_fd_options(socket); socket.set_option(asio::ip::tcp::no_delay(true)); } template static void set_receive_buffer_size(Socket& socket, size_t size) { try { socket.set_option(asio::socket_base::receive_buffer_size(size)); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to set receive buffer size: " << e.what(); } } template static size_t get_receive_buffer_size(Socket& socket) { try { asio::socket_base::receive_buffer_size option; socket.get_option(option); return option.value(); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to get receive buffer size: " << e.what(); } } template static void set_send_buffer_size(Socket& socket, size_t size) { try { socket.set_option(asio::socket_base::send_buffer_size(size)); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to set send buffer size: " << e.what(); } } template static size_t get_send_buffer_size(Socket& socket) { try { asio::socket_base::send_buffer_size option; socket.get_option(option); return option.value(); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to get send buffer size: " << e.what(); } } static inline asio::ip::tcp::resolver::iterator resolve_tcp( asio::io_service& io_service, const gu::URI& uri) { asio::ip::tcp::resolver resolver(io_service); // Give query flags explicitly to avoid having AI_ADDRCONFIG in // underlying getaddrinfo() hint flags. asio::ip::tcp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port(), asio::ip::tcp::resolver::query::flags(0)); return resolver.resolve(query); } template static void bind(Socket& socket, const gu::AsioIpAddress& addr) { try { asio::ip::tcp::endpoint endpoint(addr.impl().native(), 0); socket.bind(endpoint); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed bind socket to address: " << e.what(); } } template static struct tcp_info get_tcp_info(Socket& socket) { struct tcp_info tcpi; memset(&tcpi, 0, sizeof(tcpi)); #if defined(__linux__) || defined(__FreeBSD__) #if defined(__linux__) static int const level(SOL_TCP); #else /* FreeBSD */ static int const level(IPPROTO_TCP); #endif socklen_t tcpi_len(sizeof(tcpi)); int native_fd(native_socket_handle(socket)); if (getsockopt(native_fd, level, TCP_INFO, &tcpi, &tcpi_len)) { int err(errno); gu_throw_system_error(err) << "Failed to read TCP info from socket: " << strerror(err); } #endif /* __linux__ || __FreeBSD__ */ return tcpi; } static inline std::string uri_string (const std::string& scheme, const std::string& addr, const std::string& port = std::string("")) { if (port.length() > 0) return (scheme + "://" + addr + ':' + port); else return (scheme + "://" + addr); } #endif // GU_ASIO_SOCKET_UTIL_HPP galera-4-26.4.25/galerautils/src/gu_fifo.h000644 000164 177776 00000005152 15107057155 021411 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * Queue (FIFO) class definition * * The driving idea behind this class is avoiding malloc()'s * at all costs on one hand, on the other - make it almost * as infinite as an ordinary linked list. FIFO properties * help to achieve that. * * When needed this FIFO can be made very big, holding * millions or even billions of items while taking up * minimum space when there are few items in the queue. * malloc()'s do happen, but once per thousand of pushes and * allocate multiples of pages, thus reducing memory fragmentation. */ #ifndef _gu_fifo_h_ #define _gu_fifo_h_ #include typedef struct gu_fifo gu_fifo_t; /*! constructor */ extern gu_fifo_t* gu_fifo_create (size_t length, size_t unit); /*! puts FIFO into closed state, waking up waiting threads */ extern void gu_fifo_close (gu_fifo_t *queue); /*! (re)opens FIFO */ extern void gu_fifo_open (gu_fifo_t *queue); /*! destructor - would block until all members are dequeued */ extern void gu_fifo_destroy (gu_fifo_t *queue); /*! for logging purposes */ extern char* gu_fifo_print (gu_fifo_t *queue); /*! Lock FIFO */ extern void gu_fifo_lock (gu_fifo_t *q); /*! Release FIFO */ extern void gu_fifo_release (gu_fifo_t *q); /*! Lock FIFO and get pointer to head item * @param err contains error code if retval is NULL (otherwise - undefined): -ENODATA - queue closed, -ECANCELED - gets were canceled on the queue * @retval pointer to head item or NULL if error occured */ extern void* gu_fifo_get_head (gu_fifo_t* q, int* err); /*! Advance FIFO head pointer and release FIFO. */ extern void gu_fifo_pop_head (gu_fifo_t* q); /*! Lock FIFO and get pointer to tail item */ extern void* gu_fifo_get_tail (gu_fifo_t* q); /*! Advance FIFO tail pointer and release FIFO. */ extern void gu_fifo_push_tail (gu_fifo_t* q); /*! Return how many items are in the queue (unprotected) */ extern long gu_fifo_length (gu_fifo_t* q); /*! Return how many items were in the queue on average per push_tail() */ extern void gu_fifo_stats_get (gu_fifo_t* q, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg); /*! Flush stats counters */ extern void gu_fifo_stats_flush(gu_fifo_t* q); /*! Clear contents of the queue */ extern void gu_fifo_clear(gu_fifo_t* q); /*! Cancel getters (must be called while holding a FIFO lock) */ extern int gu_fifo_cancel_gets (gu_fifo_t* q); /*! Resume get operations */ extern int gu_fifo_resume_gets (gu_fifo_t* q); #ifndef NDEBUG extern bool gu_fifo_locked (gu_fifo_t* q); #endif #endif // _gu_fifo_h_ galera-4-26.4.25/galerautils/src/gu_asio_datagram.cpp000644 000164 177776 00000017377 15107057155 023630 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020-2025 Codership Oy // #define GU_ASIO_IMPL #include "gu_asio_datagram.hpp" #include "gu_asio_error_category.hpp" #include "gu_asio_io_service_impl.hpp" #include "gu_asio_ip_address_impl.hpp" #include "gu_asio_utils.hpp" #include "gu_asio_socket_util.hpp" #ifndef ASIO_HAS_BOOST_BIND #define ASIO_HAS_BOOST_BIND #endif // ASIO_HAS_BOOST_BIND #include "asio/ip/multicast.hpp" #include "asio/placeholders.hpp" #include static asio::ip::udp::resolver::iterator resolve_udp( asio::io_service& io_service, const gu::URI& uri) { asio::ip::udp::resolver resolver(io_service); asio::ip::udp::resolver::query query(gu::unescape_addr(uri.get_host()), uri.get_port()); return resolver.resolve(query); } static bool is_multicast(const asio::ip::udp::endpoint& ep) { if (ep.address().is_v4() == true) { return ep.address().to_v4().is_multicast(); } else if (ep.address().is_v6() == true) { return ep.address().to_v6().is_multicast(); } gu_throw_fatal; } static void join_group(asio::ip::udp::socket& socket, const asio::ip::udp::endpoint& ep, const asio::ip::address& local_if) { assert(is_multicast(ep) == true); if (ep.address().is_v4() == true) { socket.set_option(asio::ip::multicast::join_group( ep.address().to_v4(), local_if.to_v4())); socket.set_option(asio::ip::multicast::outbound_interface( local_if.to_v4())); } else { gu_throw_fatal << "mcast interface not implemented for IPv6"; socket.set_option(asio::ip::multicast::join_group(ep.address().to_v6())); } } static void leave_group(asio::ip::udp::socket& socket, const asio::ip::udp::endpoint& ep, const asio::ip::address& local_if) { // @todo This was commented out in the original code. assert(is_multicast(ep) == true); try { socket.set_option(asio::ip::multicast::leave_group( ep.address().to_v4(), local_if.to_v4())); } catch (const asio::system_error& e) { // @todo Exception is caught here if socket.if_addr option // is given when connecting to multicast group. log_warn << "Caught error while leaving multicast group: " << e.what() << ": " << ep.address(); assert(0); } } gu::AsioUdpSocket::AsioUdpSocket(gu::AsioIoService& io_service) : io_service_(io_service) , socket_(io_service.impl().native()) , local_endpoint_() , local_if_() { } gu::AsioUdpSocket::~AsioUdpSocket() noexcept(false) { close(); } asio::ip::udp::resolver::iterator gu::AsioUdpSocket::resolve_and_open(const gu::URI& uri) { try { auto resolve_result(resolve_udp(io_service_.impl().native(), uri)); socket_.open(resolve_result->endpoint().protocol()); set_fd_options(socket_); return resolve_result; } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error opening datagram socket" << uri; } } void gu::AsioUdpSocket::open(const gu::URI& uri) { try { resolve_and_open(uri); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "error opening datagram socket" << uri; } } void gu::AsioUdpSocket::close() { if (socket_.is_open()) { if (is_multicast(socket_.local_endpoint())) { leave_group(socket_, socket_.local_endpoint(), local_if_); } socket_.close(); } } void gu::AsioUdpSocket::connect(const gu::URI& uri) { try { asio::ip::udp::resolver::iterator resolve_result; if (not socket_.is_open()) { resolve_result = resolve_and_open(uri); } else { resolve_result = resolve_udp(io_service_.impl().native(), uri); } socket_.set_option(asio::ip::udp::socket::reuse_address(true)); socket_.set_option(asio::ip::udp::socket::linger(true, 1)); #if ASIO_VERSION < 101400 asio::ip::udp::socket::non_blocking_io non_blocking(true); socket_.io_control(non_blocking); #else socket_.non_blocking(true); #endif local_if_ = ::make_address( uri.get_option("socket.if_addr", ::any_addr( resolve_result->endpoint().address()))); if (is_multicast(resolve_result->endpoint())) { join_group(socket_, resolve_result->endpoint(), local_if_); socket_.set_option( asio::ip::multicast::enable_loopback( gu::from_string(uri.get_option("socket.if_loop", "false")))); socket_.set_option( asio::ip::multicast::hops( gu::from_string(uri.get_option("socket.mcast_ttl", "1")))); socket_.bind(*resolve_result); } else { socket_.bind( asio::ip::udp::endpoint( local_if_, gu::from_string(uri.get_port()))); } local_endpoint_ = socket_.local_endpoint(); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to connect UDP socket: " << e.what(); } } void gu::AsioUdpSocket::write( const std::array& buffers) try { std::array asio_bufs; asio_bufs[0] = asio::const_buffer(buffers[0].data(),buffers[0].size()); asio_bufs[1] = asio::const_buffer(buffers[1].data(),buffers[1].size()); socket_.send_to(asio_bufs, local_endpoint_); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to write UDP socket: " << e.what(); } void gu::AsioUdpSocket::send_to( const std::array& buffers, const AsioIpAddress& target_host, unsigned short target_port) { std::array asio_bufs; asio_bufs[0] = asio::const_buffer(buffers[0].data(), buffers[0].size()); asio_bufs[1] = asio::const_buffer(buffers[1].data(), buffers[1].size()); asio::ip::udp::endpoint target_endpoint( target_host.impl().native(), target_port); try { socket_.send_to(asio_bufs, target_endpoint); } catch (const asio::system_error& e) { gu_throw_system_error(e.code().value()) << "Failed to send datagram to " << target_endpoint << ": " << e.what(); } } void gu::AsioUdpSocket::async_read( const AsioMutableBuffer& buffer, const std::shared_ptr& handler) { socket_.async_receive(asio::buffer(buffer.data(), buffer.size()), boost::bind(&AsioUdpSocket::read_handler, shared_from_this(), handler, asio::placeholders::error, asio::placeholders::bytes_transferred)); } std::string gu::AsioUdpSocket::local_addr() const { return uri_string(gu::scheme::udp, ::escape_addr(socket_.local_endpoint().address()), gu::to_string(socket_.local_endpoint().port())); } // Async handlers void gu::AsioUdpSocket::read_handler( const std::shared_ptr& handler, const asio::error_code& ec, size_t bytes_transferred) { handler->read_handler(*this, AsioErrorCode(ec.value(), ec.category()), bytes_transferred); } galera-4-26.4.25/galerautils/src/gu_dbug.h000644 000164 177776 00000014532 15107057155 021411 0ustar00jenkinsnogroup000000 000000 /****************************************************************************** * * * N O T I C E * * * * Copyright Abandoned, 1987, Fred Fish * * * * * * This previously copyrighted work has been placed into the public * * domain by the author and may be freely used for any purpose, * * private or commercial. * * * * Because of the number of inquiries I was receiving about the use * * of this product in commercially developed works I have decided to * * simply make it public domain to further its unrestricted use. I * * specifically would be most happy to see this material become a * * part of the standard Unix distributions by AT&T and the Berkeley * * Computer Science Research Group, and a standard part of the GNU * * system from the Free Software Foundation. * * * * I would appreciate it, as a courtesy, if this notice is left in * * all copies and derivative works. Thank you. * * * * The author makes no warranty of any kind with respect to this * * product and explicitly disclaims any implied warranties of mer- * * chantability or fitness for any particular purpose. * * * ****************************************************************************** */ /* * FILE * * dbug.c runtime support routines for dbug package * * SCCS * * @(#)dbug.c 1.25 7/25/89 * * DESCRIPTION * * These are the runtime support routines for the dbug package. * The dbug package has two main components; the user include * file containing various macro definitions, and the runtime * support routines which are called from the macro expansions. * * Externally visible functions in the runtime support module * use the naming convention pattern "_db_xx...xx_", thus * they are unlikely to collide with user defined function names. * * AUTHOR(S) * * Fred Fish (base code) * Enhanced Software Technologies, Tempe, AZ * asuvax!mcdphx!estinc!fnf * * Binayak Banerjee (profiling enhancements) * seismo!bpa!sjuvax!bbanerje * * Michael Widenius: * DBUG_DUMP - To dump a pice of memory. * PUSH_FLAG "O" - To be used instead of "o" if we don't * want flushing (for slow systems) * PUSH_FLAG "A" - as 'O', but we will append to the out file instead * of creating a new one. * Check of malloc on entry/exit (option "S") * * Alexey Yurchenko: * Renamed global symbols for use with galera project to avoid * collisions with other software (notably MySQL) * * $Id$ */ #ifndef _dbug_h #define _dbug_h #include #include typedef unsigned int uint; typedef unsigned long ulong; #define THREAD 1 #ifdef __cplusplus extern "C" { #endif extern char _gu_dig_vec[]; extern FILE* _gu_db_fp_; #define GU_DBUG_FILE _gu_db_fp_ #if defined(GU_DBUG_ON) && !defined(_lint) extern int _gu_db_on_; extern int _gu_no_db_; extern char* _gu_db_process_; extern int _gu_db_keyword_(const char* keyword); extern void _gu_db_setjmp_ (void); extern void _gu_db_longjmp_(void); extern void _gu_db_push_ (const char* control); extern void _gu_db_pop_ (void); extern void _gu_db_enter_ (const char* _func_, const char* _file_, uint _line_, const char** _sfunc_, const char** _sfile_, uint* _slevel_, char***); extern void _gu_db_return_ (uint _line_, const char** _sfunc_, const char** _sfile_, uint* _slevel_); extern void _gu_db_pargs_ (uint _line_, const char* keyword); extern void _gu_db_doprnt_ (const char* format, ...); extern void _gu_db_dump_ (uint _line_, const char *keyword, const char *memory, uint length); extern void _gu_db_lock_file (void); extern void _gu_db_unlock_file(void); #define GU_DBUG_ENTER(a) \ const char *_gu_db_func_, *_gu_db_file_; \ uint _gu_db_level_; \ char **_gu_db_framep_; \ _gu_db_enter_ (a, __FILE__, __LINE__, &_gu_db_func_, &_gu_db_file_, \ &_gu_db_level_, &_gu_db_framep_) #define GU_DBUG_LEAVE \ (_gu_db_return_ (__LINE__, &_gu_db_func_, &_gu_db_file_, \ &_gu_db_level_)) #define GU_DBUG_RETURN(a1) {GU_DBUG_LEAVE; return(a1);} #define GU_DBUG_VOID_RETURN {GU_DBUG_LEAVE; return; } #define GU_DBUG_EXECUTE(keyword,a1) \ {if (_gu_db_on_) {if (_gu_db_keyword_ (keyword)) { a1 }}} #define GU_DBUG_PRINT(keyword,arglist) \ {if (_gu_db_on_) {_gu_db_pargs_(__LINE__,keyword); \ _gu_db_doprnt_ arglist;}} #define GU_DBUG_PUSH(a1) _gu_db_push_ (a1) #define GU_DBUG_POP() _gu_db_pop_ () #define GU_DBUG_PROCESS(a1) (_gu_db_process_ = a1) #define GU_DBUG_SETJMP(a1) (_gu_db_setjmp_ (), setjmp (a1)) #define GU_DBUG_LONGJMP(a1,a2) (_gu_db_longjmp_ (), longjmp (a1, a2)) #define GU_DBUG_DUMP(keyword,a1,a2)\ {if (_gu_db_on_) {_gu_db_dump_(__LINE__,keyword,a1,a2);}} #define GU_DBUG_IN_USE (_gu_db_fp_ && _gu_db_fp_ != stderr) #define GU_DEBUGGER_OFF _no_gu_db_=1;_gu_db_on_=0; #define GU_DEBUGGER_ON _no_gu_db_=0 #define GU_DBUG_my_pthread_mutex_lock_FILE { _gu_db_lock_file(); } #define GU_DBUG_my_pthread_mutex_unlock_FILE { _gu_db_unlock_file(); } #define GU_DBUG_ASSERT(A) assert(A) #else /* No debugger */ #define GU_DBUG_ENTER(a1) #define GU_DBUG_RETURN(a1) return(a1) #define GU_DBUG_VOID_RETURN return #define GU_DBUG_EXECUTE(keyword,a1) {} #define GU_DBUG_PRINT(keyword,arglist) {} #define GU_DBUG_PUSH(a1) {} #define GU_DBUG_POP() {} #define GU_DBUG_PROCESS(a1) {} #define GU_DBUG_SETJMP setjmp #define GU_DBUG_LONGJMP longjmp #define GU_DBUG_DUMP(keyword,a1,a2) {} #define GU_DBUG_IN_USE 0 #define GU_DEBUGGER_OFF #define GU_DEBUGGER_ON #define GU_DBUG_my_pthread_mutex_lock_FILE #define GU_DBUG_my_pthread_mutex_unlock_FILE #define GU_DBUG_ASSERT(A) {} #endif #ifdef __cplusplus } #endif #endif galera-4-26.4.25/galerautils/src/gu_to.h000644 000164 177776 00000010410 15107057155 021101 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * @file gu_to.h Public TO monitor API */ #ifndef _gu_to_h_ #define _gu_to_h_ #ifdef __cplusplus extern "C" { #endif #include #include #include /*! @typedef @brief Sequence number type. */ typedef int64_t gu_seqno_t; /*! Total Order object */ typedef struct gu_to gu_to_t; /*! @brief Creates TO object. * TO object can be used to serialize access to application * critical section using sequence number. * * @param len A length of the waiting queue. Should be no less than the * possible maximum number of threads competing for the resource, * but should not be too high either. Perhaps 1024 is good enough * for most applications. * @param seqno A starting sequence number * (the first to be used by gu_to_grab()). * @return Pointer to TO object or NULL in case of error. */ extern gu_to_t* gu_to_create (int len, gu_seqno_t seqno); /*! @brief Destroys TO object. * * @param to A pointer to TO object to be destroyed * @return 0 in case of success, negative code in case of error. * In particular -EBUSY means the object is used by other threads. */ extern long gu_to_destroy (gu_to_t** to); /*! @brief Grabs TO resource in the specified order. * On successful return the mutex associated with specified TO is locked. * Must be released gu_to_release(). @see gu_to_release * * @param to TO resource to be acquired. * @param seqno The order at which TO resource should be acquired. For any N * gu_to_grab (to, N) will return exactly after * gu_to_release (to, N-1). * @return 0 in case of success, negative code in case of error. * -EAGAIN means that there are too many threads waiting for TO * already. It is safe to try again later. * -ECANCEL if waiter was canceled, seqno is skipped in TO * -EINTR if wait was interrupted, must retry grabbing later */ extern long gu_to_grab (gu_to_t* to, gu_seqno_t seqno); /*! @brief Releases TO specified resource. * On successful return unlocks the mutex associated with TO. * TO must be previously acquired with gu_to_grab(). @see gu_to_grab * * @param to TO resource that was previously acquired with gu_to_grab(). * @param seqno The same number with which gu_to_grab() was called. * @return 0 in case of success, negative code in case of error. Any error * here is an application error - attempt to release TO resource * out of order (not paired with gu_to_grab()). */ extern long gu_to_release (gu_to_t* to, gu_seqno_t seqno); /*! @brief The last sequence number that had been used to access TO object. * Note that since no locks are held, it is a conservative estimation. * It is guaranteed however that returned seqno is no longer in use. * * @param to A pointer to TO object. * @return GCS sequence number. Since GCS TO sequence starts with 1, this * sequence can start with 0. */ extern gu_seqno_t gu_to_seqno (gu_to_t* to); /*! @brief cancels a TO monitor waiter making it return immediately * It is assumed that the caller is currenly holding the TO. * The to-be-cancelled waiter can be some later transaction but also * some earlier transaction. Tests have shown that the latter case * can also happen. * * @param to A pointer to TO object. * @param seqno Seqno of the waiter object to be cancelled * @return 0 for success and -ERANGE, if trying to cancel an earlier * transaction */ extern long gu_to_cancel (gu_to_t *to, gu_seqno_t seqno); /*! * Self cancel to without attempting to enter critical section */ extern long gu_to_self_cancel(gu_to_t *to, gu_seqno_t seqno); /*! @brief interrupts from TO monitor waiting state. * Seqno remains valid in the queue and later seqnos still need to * wait for this seqno to be released. * * The caller can (and must) later try gu_to_grab() again or cancel * the seqno with gu_to_self_cancel(). * * @param to A pointer to TO object. * @param seqno Seqno of the waiter object to be interrupted * @return 0 for success and -ERANGE, if trying to interrupt an already * used transaction */ extern long gu_to_interrupt (gu_to_t *to, gu_seqno_t seqno); #ifdef __cplusplus } #endif #endif // _gu_to_h_ galera-4-26.4.25/galerautils/src/gu_atomic.hpp000644 000164 177776 00000003435 15107057155 022304 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2010-2020 Codership Oy // // // @todo Check that the at least the following gcc versions are supported // gcc version 4.1.2 20080704 (Red Hat 4.1.2-48) // #ifndef GU_ATOMIC_HPP #define GU_ATOMIC_HPP #include "gu_atomic.h" #include namespace gu { template class Atomic { public: Atomic(I i = 0) : i_(i) { } I operator()() const { I i; gu_atomic_get(&i_, &i); return i; } Atomic& operator=(I i) { gu_atomic_set(&i_, &i); return *this; } I fetch_and_zero() { return gu_atomic_fetch_and_and(&i_, 0); } I fetch_and_add(I i) { return gu_atomic_fetch_and_add(&i_, i); } I add_and_fetch(I i) { return gu_atomic_add_and_fetch(&i_, i); } I sub_and_fetch(I i) { return gu_atomic_sub_and_fetch(&i_, i); } Atomic& operator++() { gu_atomic_fetch_and_add(&i_, 1); return *this; } Atomic& operator--() { gu_atomic_fetch_and_sub(&i_, 1); return *this; } Atomic& operator+=(I i) { gu_atomic_fetch_and_add(&i_, i); return *this; } bool operator!=(I i) { return (operator()() != i); } bool operator==(I i) { return (!operator!=(i)); } private: #if !defined(__ATOMIC_RELAXED) // implementation of gu_atomic_get() via __sync_fetch_and_or() // is not read-only for GCC mutable #endif I i_; }; } #endif // ::GU_ATOMIC_HPP galera-4-26.4.25/galerautils/src/gu_mem.h000644 000164 177776 00000004004 15107057155 021237 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007 Codership Oy /** * @file * Declarations of memory allocation functions and macros * * $Id$ */ #ifndef _gu_mem_h_ #define _gu_mem_h_ #include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /** @name Functions to help with dynamic allocation debugging. * Take additional __FILE__ and __LINE__ arguments. Should be * used as part of macros defined below */ /*@{*/ void* gu_malloc_dbg (size_t size, const char* file, unsigned int line); void* gu_calloc_dbg (size_t nmemb, size_t size, const char* file, unsigned int line); void* gu_realloc_dbg (void* ptr, size_t size, const char* file, unsigned int line); void gu_free_dbg (void* ptr, const char* file, unsigned int line); /*@}*/ /** Reports statistics on the current amount of allocated memory * total number of allocations and deallocations */ void gu_mem_stats (ssize_t* total, ssize_t* allocs, ssize_t* reallocs, ssize_t* deallocs); /** @name Applications should use the following macros */ /*@{*/ #ifdef DEBUG_MALLOC #define gu_malloc(S) gu_malloc_dbg ((S), __FILE__, __LINE__) #define gu_calloc(N,S) gu_calloc_dbg ((N), (S), __FILE__, __LINE__) #define gu_realloc(P,S) gu_realloc_dbg ((P), (S), __FILE__, __LINE__) #define gu_free(P) gu_free_dbg ((P), __FILE__, __LINE__) #else /* !DEBUG_MALLOC - use standard allocation routines */ #define gu_malloc(S) malloc ((S)) #define gu_calloc(N,S) calloc ((N), (S)) #define gu_realloc(P,S) realloc ((P), (S)) #define gu_free(P) free ((P)) #endif /* DEBUG_MALLOC */ /** Convenience macros - to avoid code clutter */ #define GU_MALLOC(type) (type*) gu_malloc (sizeof(type)) #define GU_MALLOCN(N,type) (type*) gu_malloc ((N) * sizeof(type)) #define GU_CALLOC(N,type) (type*) gu_calloc ((N), sizeof(type)) #define GU_REALLOC(P,N,type) (type*) gu_realloc((P), (N) * sizeof(type)) /*@}*/ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_mem_h_ */ galera-4-26.4.25/galerautils/src/gu_compiler.hpp000644 000164 177776 00000000567 15107057155 022645 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2020 Codership Oy */ /** @file gu_compiler.hpp * * Compiler specific workarounds. * */ #ifndef GU_COMPILER_HPP #define GU_COMPILER_HPP #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) #define GALERA_OVERRIDE #else #define GALERA_OVERRIDE override #endif /* #if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) */ #endif /* GU_COMPILER_HPP */ galera-4-26.4.25/galerautils/src/gu_abort.c000644 000164 177776 00000001755 15107057155 021575 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2011-2013 Codership Oy /** * @file Clean abort function * * $Id$ */ #define _GNU_SOURCE #include "gu_abort.h" #include "gu_system.h" #include "gu_log.h" #include /* for setrlimit() */ #include /* for signal() */ #include /* for abort() */ #ifdef __linux__ #include /* for prctl() */ #endif /* __linux__ */ void gu_abort (void) { /* avoid coredump */ struct rlimit core_limits = { 0, 0 }; setrlimit (RLIMIT_CORE, &core_limits); #ifdef __linux__ /* Linux with its coredump piping option requires additional care. * See e.g. https://patchwork.kernel.org/patch/1091782/ */ prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); #endif /* __linux__ */ /* restore default SIGABRT handler */ signal (SIGABRT, SIG_DFL); #if defined(GU_SYS_PROGRAM_NAME) gu_info ("%s: Terminated.", GU_SYS_PROGRAM_NAME); #else gu_info ("Program terminated."); #endif abort(); } galera-4-26.4.25/galerautils/src/gu_utils.hpp000644 000164 177776 00000012120 15107057155 022157 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009-2017 Codership Oy /** * @file General-purpose functions and templates * * $Id$ */ #ifndef _gu_utils_hpp_ #define _gu_utils_hpp_ #include #include #include #include #include "gu_exception.hpp" #include "gu_types.hpp" namespace gu { /* * String conversion functions for primitive types */ /*! Generic to_string() template function */ template inline std::string to_string(const T& x, std::ios_base& (*f)(std::ios_base&) = std::dec) { std::ostringstream out; out << std::showbase << f << x; return out.str(); } /*! Specialized template: make bool translate into 'true' or 'false' */ template <> inline std::string to_string(const bool& x, std::ios_base& (*f)(std::ios_base&)) { std::ostringstream out; out << std::boolalpha << x; return out.str(); } /*! Specialized template: make double to print with full precision */ template <> inline std::string to_string(const double& x, std::ios_base& (*f)(std::ios_base&)) { const int sigdigits = std::numeric_limits::digits10; // or perhaps std::numeric_limits::max_digits10? std::ostringstream out; out << std::setprecision(sigdigits) << x; return out.str(); } /*! Generic from_string() template. Default base is decimal. * @throws NotFound */ template inline T from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&) = std::dec) { std::istringstream iss(s); T ret; try { iss >> f >> ret; if (iss.fail() || not iss.eof()) { throw NotFound(); } } catch (gu::Exception& e) { throw NotFound(); } return ret; } /*! Specialized template for reading strings. This is to avoid throwing * NotFound in case of empty string. */ template <> inline std::string from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&)) { return s; } /*! Specialized template for reading pointers. Default base is hex. * @throws NotFound */ template <> inline void* from_string(const std::string& s, std::ios_base& (*f)(std::ios_base&)) { std::istringstream iss(s); void* ret; iss >> std::hex >> ret; if (iss.fail() || not iss.eof()) { throw NotFound(); } return ret; } extern "C" const char* gu_str2bool (const char* str, bool* bl); /*! Specialized template for reading bool. Tries both 1|0 and true|false * @throws NotFound */ template <> inline bool from_string (const std::string& s, std::ios_base& (*f)(std::ios_base&)) { bool ret; const char* const str(s.c_str()); const char* const endptr(gu_str2bool(str, &ret)); if (endptr == str || endptr == 0 || *endptr != '\0') throw NotFound(); return ret; } /*! * Substitute for the Variable Length Array on the stack from C99. * Provides automatic deallocation when out of scope: * * void foo(size_t n) { VLA bar(n); bar[0] = 5; throw; } */ template class VLA { T* array; VLA (const VLA&); VLA& operator= (const VLA&); public: VLA (size_t n) : array(new T[n]) {} ~VLA () { delete[] array; } T* operator& () { return array; } T& operator[] (size_t i) { return array[i]; } }; /*! * Object deletion operator. Convenient with STL containers containing * pointers. Example: * * @code * void cleanup() * { * for_each(container.begin(), container.end(), DeleteObject()); * container.clear(); * } * * @endcode */ class DeleteObject { public: template void operator()(T* t) { delete t; } }; typedef std::ios_base& (*base_t) (std::ios_base& str); template class PrintBase { public: explicit PrintBase(T t) : val_(t) {} void print(std::ostream& os) const { using namespace std; ios_base::fmtflags const old_flags(os.flags()); char const old_fill (os.fill()); int width(sizeof(T) * 2); // default hex width if (base == oct) width = width * 1.333334 + 0.5; if (prefix) os << showbase; os << internal << base << setfill('0') << setw(width) << val_; os.flags(old_flags); os.fill(old_fill); } private: T const val_; }; template std::ostream& operator << (std::ostream& os, const PrintBase& b) { b.print(os); return os; } /*! template to do arithmetics on void and byte pointers, compiler will * catch anything else. * @return input type */ template inline T* ptr_offset(T* ptr, PtrOffsetType i) { return static_cast(ptr) + i; } template inline const T* ptr_offset(const T* ptr, PtrOffsetType i) { return static_cast(ptr)+i; } } // namespace gu #endif /* _gu_utils_hpp_ */ galera-4-26.4.25/galerautils/src/gu_rand.c000644 000164 177776 00000002124 15107057155 021401 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2015 Codership Oy /** * @file routines to generate "random" seeds for RNGs by collecting some easy * entropy. * * gu_rand_seed_long() goes for srand48() * * gu_rand_seed_int() goes for srand() and rand_r() * * $Id$ */ #include "gu_rand.h" #include "gu_hash.h" /*! Structure to hold entropy data. * Should be at least 20 bytes on 32-bit systems and 28 bytes on 64-bit */ /* Packed to avoid uninitialized data warnings when passed to hash */ struct gu_rse { long long time; const void* heap_ptr; const void* stack_ptr; long pid; }__attribute__((packed)); typedef struct gu_rse gu_rse_t; long int gu_rand_seed_long (long long time, const void* heap_ptr, pid_t pid) { gu_rse_t rse = { time, heap_ptr, &time, pid }; return gu_fast_hash64_medium (&rse, sizeof(rse)); } #if GU_WORDSIZE == 32 unsigned int gu_rand_seed_int (long long time, const void* heap_ptr, pid_t pid) { gu_rse_t rse = { time, heap_ptr, &time, pid }; return gu_fast_hash32_short (&rse, sizeof(rse)); } #endif /* GU_WORDSIZE == 32 */ galera-4-26.4.25/galerautils/src/gu_hexdump.h000644 000164 177776 00000001776 15107057155 022150 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012-2013 Codership Oy /** * @file Functions to dump binary buffer contents in a readable form * * $Id$ */ #ifndef _gu_hexdump_h_ #define _gu_hexdump_h_ #include "gu_types.h" #ifdef __cplusplus extern "C" { #endif /* This makes it 32*2 + 7 spaces = 71 character per line - just short of 80 */ #define GU_HEXDUMP_BYTES_PER_LINE 32 /*! Dumps contents of the binary buffer in a readable form to a 0-terminated * string of length not exeeding str_size - 1 * @param buf input binary buffer * @param but_size size of the input buffer * @param str target string buffer (will be always 0-terminated) * @param str_size string buffer size (including terminating 0) * @param alpha dump alphanumeric characters as they are, padded with '.' * (e.g. D.u.m.p.) */ extern void gu_hexdump(const void* buf, ssize_t buf_size, char* str, ssize_t str_size, bool alpha); #ifdef __cplusplus } #endif #endif /* _gu_hexdump_h_ */ galera-4-26.4.25/galerautils/src/gu_int128.h000644 000164 177776 00000012523 15107057155 021513 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /** * @file 128-bit arithmetic macros. This is so far needed only for FNV128 * hash algorithm * * $Id$ */ #ifndef _gu_int128_h_ #define _gu_int128_h_ #include "gu_arch.h" #include "gu_byteswap.h" #include #if defined(__SIZEOF_INT128__) typedef int __attribute__((__mode__(__TI__))) int128_t; typedef unsigned int __attribute__((__mode__(__TI__))) uint128_t; typedef int128_t gu_int128_t; typedef uint128_t gu_uint128_t; #define GU_SET128(_a, hi64, lo64) _a = (((uint128_t)hi64) << 64) + lo64 #define GU_MUL128_INPLACE(_a, _b) _a *= _b #define GU_IMUL128_INPLACE(_a, _b) GU_MUL128_INPLACE(_a, _b) #define GU_EQ128(_a, _b) (_a == _b) #else /* Uncapable of 16-byte integer arythmetic */ #if defined(GU_LITTLE_ENDIAN) #define GU_64LO 0 #define GU_64HI 1 #define GU_32LO 0 #define GU_32HI 3 #define GU_32_0 0 #define GU_32_1 1 #define GU_32_2 2 #define GU_32_3 3 typedef union gu_int128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t lo; uint64_t mid; int32_t hi;}__attribute__((packed)) m; #ifdef __cplusplus gu_int128() : m() {} gu_int128(int64_t hi, uint64_t lo) : m() { u64[0] = lo; u64[1] = hi; } #endif } gu_int128_t; typedef union gu_uint128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t lo; uint64_t mid; uint32_t hi;}__attribute__((packed)) m; #ifdef __cplusplus gu_uint128() : m() {} gu_uint128(uint64_t hi, uint64_t lo) : m() { u64[0] = lo; u64[1] = hi; } #endif } gu_uint128_t; #ifdef __cplusplus #define GU_SET128(_a, hi64, lo64) _a = gu_uint128(hi64, lo64) #else #define GU_SET128(_a, hi64, lo64) _a = { .u64 = { lo64, hi64 } } #endif #define GU_MUL128_INPLACE(_a,_b) { \ uint64_t m00 = (uint64_t)(_a).u32[0] * (_b).u32[0]; \ uint64_t m10 = (uint64_t)(_a).u32[1] * (_b).u32[0]; \ uint64_t m20 = (uint64_t)(_a).u32[2] * (_b).u32[0]; \ uint64_t m01 = (uint64_t)(_a).u32[0] * (_b).u32[1]; \ uint64_t m02 = (uint64_t)(_a).u32[0] * (_b).u32[2]; \ uint64_t m11 = (uint64_t)(_a).u32[1] * (_b).u32[1]; \ uint32_t m30 = (_a).u32[3] * (_b).u32[0]; \ uint32_t m21 = (_a).u32[2] * (_b).u32[1]; \ uint32_t m12 = (_a).u32[1] * (_b).u32[2]; \ uint32_t m03 = (_a).u32[0] * (_b).u32[3]; \ (_a).u64[GU_64LO] = m00; (_a).u64[GU_64HI] = 0; \ (_a).m.mid += m10; (_a).m.hi += ((_a).m.mid < m10); \ (_a).m.mid += m01; (_a).m.hi += ((_a).m.mid < m01); \ (_a).u64[GU_64HI] += m20 + m11 + m02; \ (_a).u32[GU_32HI] += m30 + m21 + m12 + m03; \ } #else /* Big-Endian */ #define GU_64HI 0 #define GU_64LO 1 #define GU_32HI 0 #define GU_32LO 3 typedef union gu_int128 { uint64_t u64[2]; uint32_t u32[4]; struct {int32_t hi; uint64_t mid; uint32_t lo;}__attribute__((packed)) m; #ifdef __cplusplus gu_int128() {} gu_int128(int64_t hi, uint64_t lo) { u64[0] = hi; u64[1] = lo; } #endif } gu_int128_t; typedef union gu_uint128 { uint64_t u64[2]; uint32_t u32[4]; struct {uint32_t hi; uint64_t mid; uint32_t lo;}__attribute__((packed)) m; #ifdef __cplusplus gu_uint128() {} gu_uint128(uint64_t hi, uint64_t lo) { u64[0] = hi; u64[1] = lo; } #endif } gu_uint128_t; #ifdef __cplusplus #define GU_SET128(_a, hi64, lo64) _a = gu_uint128(hi64, lo64) #else #define GU_SET128(_a, hi64, lo64) _a = { .u64 = { hi64, lo64 } } #endif #define GU_MUL128_INPLACE(_a,_b) { \ uint64_t m33 = (uint64_t)_a.u32[3] * _b.u32[3]; \ uint64_t m23 = (uint64_t)_a.u32[2] * _b.u32[3]; \ uint64_t m13 = (uint64_t)_a.u32[1] * _b.u32[3]; \ uint64_t m32 = (uint64_t)_a.u32[3] * _b.u32[2]; \ uint64_t m31 = (uint64_t)_a.u32[3] * _b.u32[1]; \ uint64_t m22 = (uint64_t)_a.u32[2] * _b.u32[2]; \ uint32_t m30 = _a.u32[3] * _b.u32[0]; \ uint32_t m21 = _a.u32[2] * _b.u32[1]; \ uint32_t m12 = _a.u32[1] * _b.u32[2]; \ uint32_t m03 = _a.u32[0] * _b.u32[3]; \ _a.u64[GU_64LO] = m00; _a.u64[GU_64HI] = 0; \ _a.m.mid += m23; _a.m.hi += (_a.m.mid < m23); \ _a.m.mid += m32; _a.m.hi += (_a.m.mid < m32); \ _a.u64[GU_64HI] += m13 + m22 + m31; \ _a.u32[GU_32HI] += m30 + m21 + m12 + m03; \ } #endif /* Big-Endian */ #define GU_IMUL128_INPLACE(_a, _b) { \ uint32_t sign = ((_a).u32[GU_32HI] ^ (_b).u32[GU_32HI]) & 0x80000000UL; \ GU_MUL128_INPLACE (_a, _b); \ (_a).u32[GU_32HI] |= sign; \ } #define GU_EQ128(_a, _b) (!memcmp(&_a,&_b,sizeof(_a))) #endif /* __SIZEOF_INT128__ */ /* Not sure how to make it both portable, efficient and still follow the * signature of other byteswap functions at the same time. * So this one does inplace conversion. */ #ifdef __cplusplus extern "C" { #endif static inline void gu_bswap128 (gu_uint128_t* const arg) { uint64_t* x = (uint64_t*)arg; uint64_t tmp = gu_bswap64(x[0]); x[0] = gu_bswap64(x[1]); x[1] = tmp; } #ifdef __cplusplus } #endif #ifdef GU_LITTLE_ENDIAN # define gu_le128(x) {} # define gu_be128(x) gu_bswap128(x) #else # define gu_le128(x) gu_bswap128(x) # define gu_be128(x) {} #endif /* GU_LITTLE_ENDIAN */ #define htog128(x) gu_le128(x) #define gtoh128(x) htog128(x) #endif /* _gu_int128_h_ */ galera-4-26.4.25/galerautils/src/gu_exception.hpp000644 000164 177776 00000002373 15107057155 023026 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2015 Codership Oy * */ #ifndef __GU_EXCEPTION__ #define __GU_EXCEPTION__ #include #include #include "gu_errno.h" namespace gu { /*! Some utility exceptions to indicate special conditions. */ class NotSet {}; class NotFound {}; class Exception: public std::exception { public: Exception (const std::string& msg, int err) : msg_(msg), err_(err) {} Exception (const Exception& e) : msg_(e.msg_), err_(e.err_) {} virtual ~Exception () throw() {} const char* what () const throw() { return msg_.c_str(); } int get_errno () const { return err_; } void trace (const char* file, const char* func, int line) const; private: mutable std::string msg_; int err_; }; } /* to mark a place where exception was caught */ #define GU_TRACE(_exception_) _exception_.trace(__FILE__, __FUNCTION__, __LINE__) #ifndef NDEBUG /* enabled together with assert() */ #define gu_trace(_expr_) \ try { _expr_; } catch (gu::Exception& e) { GU_TRACE(e); throw; } #else #define gu_trace(_expr_) _expr_ #endif // NDEBUG #endif // __GU_EXCEPTION__ galera-4-26.4.25/galerautils/src/gu_resolver.hpp000644 000164 177776 00000017137 15107057155 022675 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2012 Codership Oy * * $Id$ */ /*! * @file gu_resolver.hpp Simple resolver utility */ #ifndef __GU_RESOLVER_HPP__ #define __GU_RESOLVER_HPP__ #include "gu_throw.hpp" #include #include #include #include #include // Forward declarations namespace gu { class URI; } // namespace gu // Declarations namespace gu { namespace net { /*! * @class Sockaddr * * @brief Class encapsulating struct sockaddr. * * Class encapsulating struct sockaddr and providing * simple interface to access sockaddr fields. */ class Sockaddr; /*! * @class IMReq * * @brief Class encapsulating imreq structs. */ class MReq; /*! * @class Addrinfo * * @brief Class encapsulating struct addrinfo. * * Class encapsulating struct addrinfo and providing interface * to access addrinfo fields. */ class Addrinfo; /*! * Resolve address given in @uri * * @return Addrinfo object representing address * * @throw gu::Exception in case of failure */ Addrinfo resolve(const gu::URI& uri); } // namespace net } // namespace gu class gu::net::Sockaddr { public: /*! * Default constuctor. * * @param sa Pointer to sockaddr struct * @param sa_len Length of sockaddr struct */ Sockaddr(const sockaddr* sa, socklen_t sa_len); /*! * Copy constructor. * * @param sa Reference to Sockaddr */ Sockaddr(const Sockaddr& sa); /*! * Destructor */ ~Sockaddr(); /*! * Get address family. * * @return Address family */ sa_family_t get_family() const { return sa_->sa_family; } /*! * Get port in network byte order. This is applicable only * for AF_INET, AF_INET6. * * @return Port in network byte order */ unsigned short get_port() const { switch(sa_->sa_family) { case AF_INET: return reinterpret_cast(sa_)->sin_port; case AF_INET6: return reinterpret_cast(sa_)->sin6_port; default: gu_throw_fatal; } } /*! * Get pointer to address. Return value is pointer to void, * user must do casting by himself. * * @todo: Figure out how this could be done in type safe way. * * @return Void pointer to address element. */ const void* get_addr() const { switch(sa_->sa_family) { case AF_INET: return &reinterpret_cast(sa_)->sin_addr; case AF_INET6: return &reinterpret_cast(sa_)->sin6_addr; default: gu_throw_fatal << "invalid address family: " << sa_->sa_family; } } socklen_t get_addr_len() const { switch(sa_->sa_family) { case AF_INET: return sizeof(reinterpret_cast(sa_)->sin_addr); case AF_INET6: return sizeof(reinterpret_cast(sa_)->sin6_addr); default: gu_throw_fatal; } } /*! * Get non-const reference to sockaddr struct. * * @return Non-const reference to sockaddr struct. */ sockaddr& get_sockaddr() { return *sa_; } /*! * Get const reference to sockaddr struct. * * @return Const reference to sockaddr struct. */ const sockaddr& get_sockaddr() const { return *sa_; } /*! * Get length of sockaddr struct. * * @return Length of sockaddr struct */ socklen_t get_sockaddr_len() const { return sa_len_; } bool is_multicast() const; bool is_broadcast() const; bool is_anyaddr() const; bool is_linklocal() const; static Sockaddr get_anyaddr(const Sockaddr& sa) { Sockaddr ret(sa); switch(ret.sa_->sa_family) { case AF_INET: reinterpret_cast(ret.sa_)->sin_addr.s_addr = 0; break; case AF_INET6: memset(&reinterpret_cast(ret.sa_)->sin6_addr, 0, sizeof(struct in6_addr)); break; default: gu_throw_fatal << "invalid address family: " << ret.sa_->sa_family; } return ret; } uint32_t get_scope_id() const { switch(sa_->sa_family) { case AF_INET6: return reinterpret_cast(sa_)->sin6_scope_id; default: assert(0); return 0; } } Sockaddr& operator=(const Sockaddr& sa) { memcpy(sa_, sa.sa_, sa_len_); return *this; } private: sockaddr* sa_; socklen_t sa_len_; }; class gu::net::MReq { public: MReq(const Sockaddr& mcast_addr, const Sockaddr& if_addr); ~MReq(); const void* get_mreq() const { return mreq_; } socklen_t get_mreq_len() const { return mreq_len_; } int get_ipproto() const { return ipproto_; } int get_add_membership_opt() const { return add_membership_opt_; } int get_drop_membership_opt() const { return drop_membership_opt_; } int get_multicast_if_opt() const { return multicast_if_opt_; } int get_multicast_loop_opt() const { return multicast_loop_opt_; } int get_multicast_ttl_opt() const { return multicast_ttl_opt_; } const void* get_multicast_if_value() const; int get_multicast_if_value_size() const; private: MReq(const MReq&); void operator=(const MReq&); void* mreq_; socklen_t mreq_len_; int ipproto_; int add_membership_opt_; int drop_membership_opt_; int multicast_if_opt_; int multicast_loop_opt_; int multicast_ttl_opt_; }; class gu::net::Addrinfo { public: /*! * Default constructor. * * @param ai Const reference to addrinfo struct */ Addrinfo(const addrinfo& ai); /*! * Copy costructor. * * @param ai Const reference to Addrinfo object to copy */ Addrinfo(const Addrinfo& ai); /*! * Copy constructor that replaces @ai sockaddr struct. * * @param ai Const reference to Addrinfo object to copy * @param sa Const reference to Sockaddr struct that replaces * @ai sockaddr data */ Addrinfo(const Addrinfo& ai, const Sockaddr& sa); /*! * Destructor. */ ~Addrinfo(); /*! * Get address family, AF_INET, AF_INET6 etc. * * @return Address family */ int get_family() const { return ai_.ai_family; } /*! * Get socket type, SOCK_STREAM, SOCK_DGRAM etc * * @return Socket type */ int get_socktype() const { return ai_.ai_socktype; } /*! * Get protocol. * * @return Protocol */ int get_protocol() const { return ai_.ai_protocol; } /*! * Get length of associated sockaddr struct * * @return Length of associated sockaddr struct */ socklen_t get_addrlen() const { return ai_.ai_addrlen; } /*! * Get associated Sockaddr object. * * @return Associated Sockaddr object */ Sockaddr get_addr() const { return Sockaddr(ai_.ai_addr, ai_.ai_addrlen); } /*! * Get string representation of the addrinfo. * * @return String representation of the addrinfo */ std::string to_string() const; private: addrinfo ai_; }; #endif /* __GU_RESOLVER_HPP__ */ galera-4-26.4.25/galerautils/src/gu_regex.cpp000644 000164 177776 00000002252 15107057155 022131 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2009 Codership Oy /** * @file Regular expressions parser based on POSIX regex functions in * * $Id$ */ #include "gu_utils.hpp" #include "gu_regex.hpp" namespace gu { using std::string; using std::vector; string RegEx::strerror (int rc) const { char buf[128]; regerror(rc, ®ex, buf, sizeof(buf)); return string (buf); } static inline RegEx::Match regmatch2Match (const string& str, const regmatch_t& rm) { if (rm.rm_so == -1) return RegEx::Match(); return RegEx::Match (str.substr(rm.rm_so, rm.rm_eo - rm.rm_so)); } vector RegEx::match (const string& str, size_t num) const { vector ret; int rc; VLA matches(num); if ((rc = regexec(®ex, str.c_str(), num, &matches, 0))) { gu_throw_error (EINVAL) << "regexec(" << str << "): " << strerror(rc); } for (size_t i = 0; i < num; ++i) { ret.push_back (regmatch2Match (str, matches[i])); } return ret; } } galera-4-26.4.25/galerautils/src/gu_uuid.h000644 000164 177776 00000006425 15107057155 021440 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2017 Codership Oy * * $Id$ */ /* * Universally Unique IDentifier. RFC 4122. * Time-based implementation. * */ #ifndef _gu_uuid_h_ #define _gu_uuid_h_ #include "gu_types.h" #include "gu_macros.h" #include "gu_arch.h" // GU_ASSERT_ALIGNMENT() #include #include #ifdef __cplusplus extern "C" { #endif /*! UUID internally is represented as a BE integer which allows using * memcmp() as comparison function and straightforward printing */ #define GU_UUID_LEN 16 typedef wsrep_uuid_t gu_uuid_t; static gu_uuid_t const GU_UUID_NIL = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; /*! length of string representation */ #define GU_UUID_STR_LEN 36 /*! Macros for pretty printing */ #define GU_UUID_FORMAT \ "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" #define GU_UUID_ARGS(uuid) \ (uuid)->data[ 0], (uuid)->data[ 1], (uuid)->data[ 2], (uuid)->data[ 3],\ (uuid)->data[ 4], (uuid)->data[ 5], (uuid)->data[ 6], (uuid)->data[ 7],\ (uuid)->data[ 8], (uuid)->data[ 9], (uuid)->data[10], (uuid)->data[11],\ (uuid)->data[12], (uuid)->data[13], (uuid)->data[14], (uuid)->data[15] /* this is used for scanf, variables are by reference */ #define GU_UUID_FORMAT_SCANF \ "%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx" #define GU_UUID_ARGS_SCANF(uuid) \ &(uuid)->data[ 0], &(uuid)->data[ 1], &(uuid)->data[ 2], &(uuid)->data[ 3],\ &(uuid)->data[ 4], &(uuid)->data[ 5], &(uuid)->data[ 6], &(uuid)->data[ 7],\ &(uuid)->data[ 8], &(uuid)->data[ 9], &(uuid)->data[10], &(uuid)->data[11],\ &(uuid)->data[12], &(uuid)->data[13], &(uuid)->data[14], &(uuid)->data[15] /*! * Generates new UUID. * If node is NULL, will generate random (if /dev/urand is present) or * pseudorandom data instead. * @param uuid * pointer to uuid_t * @param node * some unique data that goes in place of "node" field in the UUID * @param node_len * length of the node buffer */ extern void gu_uuid_generate (gu_uuid_t* uuid, const void* node, size_t node_len); /*! * Compare two UUIDs according to RFC * @return -1, 0, 1 if left is respectively less, equal or greater than right */ extern int gu_uuid_compare (const gu_uuid_t* left, const gu_uuid_t* right); /*! * Compare ages of two UUIDs * @return -1, 0, 1 if left is respectively younger, equal or older than right */ extern int gu_uuid_older (const gu_uuid_t* left, const gu_uuid_t* right); /*! * Print UUID into buffer * @return Number of bytes printed (not including trailing '\0') or -1 on error. */ extern ssize_t gu_uuid_print(const gu_uuid_t* uuid, char* buf, size_t buflen); /*! * Scan UUID from buffer * @return Number of bytes read (should match to sizeof(uuid)) or -1 on error */ extern ssize_t gu_uuid_scan(const char* buf, size_t buflen, gu_uuid_t* uuid); /*! * Copy UUID from to as ::memcpy() seems to be considerably faster than the * default assignment operator for structs */ GU_FORCE_INLINE void gu_uuid_copy(gu_uuid_t* const to, const gu_uuid_t* const from) { GU_ASSERT_ALIGNMENT(*to); GU_ASSERT_ALIGNMENT(*from); memcpy(to, from, sizeof(gu_uuid_t)); } #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _gu_uuid_h_ */ galera-4-26.4.25/galerautils/src/gu_histogram.hpp000644 000164 177776 00000001076 15107057155 023024 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef _gu_histogram_hpp_ #define _gu_histogram_hpp_ #include #include namespace gu { class Histogram { public: Histogram(const std::string&); void insert(const double); void clear(); friend std::ostream& operator<<(std::ostream&, const Histogram&); std::string to_string() const; private: std::map cnt_; }; std::ostream& operator<<(std::ostream&, const Histogram&); } #endif // _gu_histogram_hpp_ galera-4-26.4.25/galerautils/src/gu_hash.h000644 000164 177776 00000010073 15107057155 021407 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2012 Codership Oy /** * @file Defines 3 families of standard Galera hash methods * * 1) gu_hash - a general use universal hash: 128, 64 and 32-bit variants. * * 2) gu_fast_hash - optimized for 64-bit Intel CPUs, limited to whole message * only, also comes in 128, 64 and 32-bit flavors. * 3) gu_table_hash - possibly even faster, platform-optimized, globally * inconsistent hash functions to be used only in local hash * tables. Only size_t variants defined. * * 128-bit result is returned through void* parameter as a byte array in * canonical order. * 64/32-bit results are returned as uint64_t/uint32_t integers and thus in host * byte order (require conversion to network/Galera byte order for serialization). * * $Id$ */ #ifndef _gu_hash_h_ #define _gu_hash_h_ #ifdef __cplusplus extern "C" { #endif #include "gu_fnv.h" #include "gu_mmh3.h" #include "gu_spooky.h" /* * General purpose globally consistent _fast_ hash, if in doubt use that. */ /* This is to hash multipart message */ #define gu_hash_t gu_mmh128_ctx_t #define gu_hash_init(_hash) gu_mmh128_init(_hash) #define gu_hash_append(_hash, _msg, _len) gu_mmh128_append(_hash, _msg, _len) #define gu_hash_get128(_hash, _res) gu_mmh128_get(_hash, _res) #define gu_hash_get64(_hash) gu_mmh128_get64(_hash) #define gu_hash_get32(_hash) gu_mmh128_get32(_hash) /* This is to hash a whole message in one go */ #define gu_hash128(_msg, _len, _res) gu_mmh128(_msg, _len, _res) #define gu_hash64(_msg, _len) gu_mmh128_64(_msg, _len) #define gu_hash32(_msg, _len) gu_mmh128_32(_msg, _len) /* * Hash optimized for speed, can't do multipart messages, but should still * be usable as global identifier */ #define GU_SHORT64_LIMIT 16 #define GU_MEDIUM64_LIMIT 512 static GU_INLINE void gu_fast_hash128 (const void* const msg, size_t const len, void* const res) { if (len < GU_MEDIUM64_LIMIT) { gu_mmh128 (msg, len, res); } else { gu_spooky128 (msg, len, res); } } static GU_FORCE_INLINE uint64_t gu_fast_hash64_short (const void* const msg, size_t const len) { uint64_t res = GU_FNV64_SEED; gu_fnv64a_internal (msg, len, &res); /* mix to improve avalanche effect */ res *= GU_ROTL64(res, 56); return res ^ GU_ROTL64(res, 43); } #define gu_fast_hash64_medium gu_mmh128_64 #define gu_fast_hash64_long gu_spooky64 static GU_INLINE uint64_t gu_fast_hash64 (const void* const msg, size_t const len) { if (len < GU_SHORT64_LIMIT) { return gu_fast_hash64_short (msg, len); } else if (len < GU_MEDIUM64_LIMIT) { return gu_fast_hash64_medium (msg, len); } else { return gu_fast_hash64_long (msg, len); } } #define gu_fast_hash32_short gu_mmh32 #define gu_fast_hash32_medium gu_mmh128_32 #define gu_fast_hash32_long gu_spooky32 #define GU_SHORT32_LIMIT 32 #define GU_MEDIUM32_LIMIT 512 static GU_INLINE uint32_t gu_fast_hash32 (const void* const msg, size_t const len) { if (len < GU_SHORT32_LIMIT) { return gu_fast_hash32_short (msg, len); } else if (len < GU_MEDIUM32_LIMIT) { return gu_fast_hash32_medium (msg, len); } else { return gu_fast_hash32_long (msg, len); } } /* * Platform-optimized hashes only for local hash tables, don't produce globally * consistent results. No 128-bit version for obvious reasons. * * Resulting gu_table_hash() will be the fastest hash function returning size_t */ #if GU_WORDSIZE == 64 #define gu_table_hash gu_fast_hash64 /* size_t is normally 64-bit here */ #elif GU_WORDSIZE == 32 /* on 32-bit platform MurmurHash32 is only insignificantly slower than FNV32a * on messages < 10 bytes but produces far better hash. */ #define gu_table_hash gu_mmh32 /* size_t is normally 32-bit here */ #else /* GU_WORDSIZE neither 64 nor 32 bits */ # error Unsupported wordsize! #endif #ifdef __cplusplus } #endif #endif /* _gu_hash_h_ */ galera-4-26.4.25/galerautils/src/gu_deqmap.hpp000644 000164 177776 00000034436 15107057155 022304 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2020 Codership Oy /** * @file A wrapper over std::deque to emulate continuous integer sequence map. * * Holes are tolerated, but take the same amount of memory as elements. * For that purpose target class must have a default value which is treated as * null. As a result, removing N elements does not always reduce the size by N. * * Insert at iterator behavior also had to be changed from that of std::deque: * to be consistent insert happens exactly TO the element pointed by iterator * so it mostly works as "update" and only adds new elements at the end() * * The implementation is optimized towards elements mostly added at the back and * removed from the front. */ #ifndef GU_DEQMAP_HPP #define GU_DEQMAP_HPP #include "gu_exception.hpp" // NotFound #include #include // std::pair<> #include // bidirectional_iterator_tag #include // std::invalid_argument #include #if __cplusplus >= 201103L #include #endif #ifdef GU_DEQMAP_CONSISTENCY_CHECKS #include #include #define GU_DEQMAP_ASSERT_CONSISTENCY assert_consistency(__func__, __LINE__) #else #define GU_DEQMAP_ASSERT_CONSISTENCY #endif /* GU_DEQMAP_CONSISTENCY_CHECKS */ namespace gu { template > class DeqMap { typedef std::deque base_type; public: typedef Key index_type; #if __cplusplus >= 201103L typedef typename std::make_signed::type difference_type; #else typedef long long difference_type; #endif typedef typename base_type::size_type size_type; typedef typename base_type::value_type value_type; typedef typename base_type::pointer pointer; typedef typename base_type::const_pointer const_pointer; typedef typename base_type::reference reference; typedef typename base_type::const_reference const_reference; typedef typename base_type::allocator_type allocator_type; typedef typename base_type::iterator iterator; typedef typename base_type::const_iterator const_iterator; typedef typename base_type::reverse_iterator reverse_iterator; typedef typename base_type::const_reverse_iterator const_reverse_iterator; static value_type null_value() { return value_type(); } /** A test for an unset element (hole) */ static bool not_set(const_reference val) { return val == null_value(); } /** * @param begin initial index value for the map. It is required for * push_back(value_type&) and push_front(value_type&) * to be meaningful operations. */ explicit DeqMap(index_type begin, const allocator_type& allocator = allocator_type()) : base_ (allocator), begin_ (begin), end_ (begin_) { GU_DEQMAP_ASSERT_CONSISTENCY; } ~DeqMap() { GU_DEQMAP_ASSERT_CONSISTENCY; }; /** total number of elements allocated (not all of them set) */ size_type size() const { return base_.size(); } bool empty() const { return base_.empty(); } /** * @param begin initial index value for the map. See constructor. */ void clear(index_type begin) { GU_DEQMAP_ASSERT_CONSISTENCY; base_.clear(); begin_ = begin; end_ = begin_; GU_DEQMAP_ASSERT_CONSISTENCY; } index_type index_begin() const { return begin_; } index_type index_end() const { return end_; } index_type index_front() const { return index_begin(); } index_type index_back() const { return index_end() - 1; } iterator begin() { return base_.begin(); } iterator end() { return base_.end(); } const_iterator begin() const { return base_.begin(); } const_iterator end() const { return base_.end(); } reverse_iterator rbegin() { return base_.rbegin(); } reverse_iterator rend() { return base_.rend(); } const_reverse_iterator rbegin() const { return base_.rbegin(); } const_reverse_iterator rend() const { return base_.rend(); } const_reference front() const { return base_.front(); } const_reference back() const { return base_.back(); } const_reference operator[] (index_type i) const { return base_[i - begin_]; } const_reference at(index_type i) const { if (begin_ <= i && i < end_) { const_reference v(operator[](i)); if (!not_set(v)) return v; } throw NotFound(); } iterator find(index_type i) { return find_tmpl(*this, i); } const_iterator find(index_type i) const { return find_tmpl(*this, i); } /* pop_front() and pop_back() are the fastest element removal operations * - so set them as base for the rest. */ void pop_front() { do { base_.pop_front(); ++begin_; } while (!empty() && not_set(front())); // trim front GU_DEQMAP_ASSERT_CONSISTENCY; } void pop_back() { do { base_.pop_back(); --end_; } while (!empty() && not_set(back())); // trim back GU_DEQMAP_ASSERT_CONSISTENCY; } iterator erase(iterator position) { /* Invalid ranges for std::deque::erase() produce undefined behavior * so we are not checking for empty container here. */ GU_DEQMAP_ASSERT_CONSISTENCY; if (begin() == position) { pop_front(); return begin(); } else if (--end() == position) { pop_back(); return end(); } else /* don't remove elements from the middle, just unset them */ { return unset(position); } } iterator erase(iterator first, iterator last) { GU_DEQMAP_ASSERT_CONSISTENCY; // compute before iterators get invalidated by erase() size_type const diff(last - first); if (begin() == first) { base_.erase(first, last); begin_ += diff; if (!empty() && not_set(front())) // trim front { pop_front(); } GU_DEQMAP_ASSERT_CONSISTENCY; return begin(); } else if (base_.end() == last) { base_.erase(first, last); end_ -= diff; if (!empty() && not_set(back())) // trim back { pop_back(); } GU_DEQMAP_ASSERT_CONSISTENCY; return end(); } else /* don't remove elements from the middle, just unset them */ { while (first < last) first = unset(first); GU_DEQMAP_ASSERT_CONSISTENCY; return first; } } void erase(index_type const idx) { if (idx == begin_) { pop_front(); } else if (idx == end_ - 1) { pop_back(); } else { base_[idx - begin_] = null_value(); } } /* push_front() and push_back() are the fastest element insertion operations * - so set them as base for the rest. */ void push_front(const value_type& val) { if (!(null_value() == val)) { push_front_unchecked(val); } else { throw_null_value_exception(__func__, val, index_begin() - 1); } } void push_back (const value_type& val) { if (!(null_value() == val)) { push_back_unchecked(val); } else { throw_null_value_exception(__func__, val, index_end()); } } iterator insert(iterator position, const value_type& val) { GU_DEQMAP_ASSERT_CONSISTENCY; if (null_value() == val) { throw_null_value_exception(__func__, val, index(position)); } if (end() == position) { push_back_unchecked(val); } else /* don't insert elements in the middle, just assign them */ { *position = val; } GU_DEQMAP_ASSERT_CONSISTENCY; return position; } void insert(iterator position, size_type n, const value_type& val) { GU_DEQMAP_ASSERT_CONSISTENCY; if (null_value() == val) { throw_null_value_exception(__func__, val, index(position)); } while (position != end() && n) { position = set(position, val); --n; } if (n) { end_ += n; base_.insert(position, n, val); } GU_DEQMAP_ASSERT_CONSISTENCY; } void insert(index_type const i, const value_type& val) { GU_DEQMAP_ASSERT_CONSISTENCY; if (null_value() == val) { throw_null_value_exception(__func__, val, i); } if (begin_ != end_) { if (i >= end_) { if (i == end_) { push_back_unchecked(val); } else { size_type const off(i - end_ + 1); base_.insert(end(), off, null_value()); end_ += off; base_.back() = val; } } else if (i < begin_) { if (i + 1 == begin_) { push_front_unchecked(val); } else { size_type const off(begin_ - i); base_.insert(begin(), off, null_value()); begin_ = i; base_.front() = val; } } else { base_[i - begin_] = val; } } else { begin_ = end_ = i; push_back_unchecked(val); } GU_DEQMAP_ASSERT_CONSISTENCY; } index_type index(const_iterator it) const { GU_DEQMAP_ASSERT_CONSISTENCY; return (it - base_.begin()) + begin_; } index_type index(const_reverse_iterator it) const { GU_DEQMAP_ASSERT_CONSISTENCY; return end_ - (it - base_.rbegin()) - 1; } /** * This is a port of std::map::upper_bound(): it returns the index of the * first *set* element in container that is supposed to go after i. Unset * elements are treated as absent. */ index_type upper_bound(index_type i) const { GU_DEQMAP_ASSERT_CONSISTENCY; if (i >= end_) { return end_; } if (i >= begin_) { do { ++i; } while (i < end_ && not_set(operator[](i))); return i; } return begin_; } void print(std::ostream& os) const { os << "gu::DeqMap(size: " << size() << ", begin: " << +index_begin() << ", end: " << +index_end(); os << ", front: "; size() ? os << +front() : os << "n/a"; os << ", back: "; size() ? os << +back() : os << "n/a"; os << ')'; } private: base_type base_; index_type begin_; index_type end_; iterator& unset(iterator& it) { *it = null_value(); return ++it; } iterator& set(iterator& it, const value_type& val) { *it = val; return ++it; } void push_front_unchecked(const value_type& val) { base_.push_front(val); --begin_; } void push_back_unchecked(const value_type& val) { base_.push_back(val); ++end_; } void throw_null_value_exception(const char* const func_name, const value_type& val, const index_type& pos) { std::ostringstream what; what << "Null value '" << val << "' with index " << pos << " was passed to " << func_name; throw std::invalid_argument(what.str()); } /* Template to avoid code duplication in find() methods */ template static Iter find_tmpl (This& t, index_type i) { #ifdef GU_DEQMAP_CONSISTENCY_CHECKS t.GU_DEQMAP_ASSERT_CONSISTENCY; #endif if (i >= t.begin_ && i < t.end_) return t.begin() += (i - t.begin_); else return t.end(); } #ifdef GU_DEQMAP_CONSISTENCY_CHECKS void assert_consistency(const char* const func_name, int const line) const { bool ok(true); int check(1); ok = ok && (begin() + size() == end()); check += ok; ok = ok && (index_begin() + index_type(size()) == index_end()); check += ok; if (!empty()) { ok = ok && (!(front() == null_value())); check += ok; ok = ok && (!(back() == null_value())); check += ok; ok = ok && (operator[](index_begin()) == front()); check += ok; ok = ok && (operator[](index_end() - 1) == back()); check += ok; ok = ok && (*begin() == front()); check += ok; ok = ok && (*(--end()) == back()); check += ok; if (size() == 1) { ok = ok && (front() == back()); check += ok; ok = ok && (*begin() == *rbegin()); check += ok; } } else { ok = ok && (begin() == end()); check += ok; } if (!ok) { std::cerr << "gu::DeqMap consistency check " << check << " failed at " << func_name << "():" << line << " map: "; print(std::cerr); std::cerr << std::endl; abort(); } } #endif /* GU_DEQMAP_CONSISTENCY_CHECKS */ }; /* class DeqMap */ template static inline std::ostream& operator<<(std::ostream& os, const DeqMap& m) { m.print(os); return os; } } /* namespace gu */ #endif /* GU_DEQMAP_HPP */ galera-4-26.4.25/galerautils/src/gu_config.cpp000644 000164 177776 00000030332 15107057155 022264 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2014 Codership Oy /** * @file * Configuration management implementation * * $Id$ */ #include "gu_config.h" #include "gu_config.hpp" #include "gu_logger.hpp" #include "gu_assert.hpp" const char gu::Config::PARAM_SEP = ';'; // parameter separator const char gu::Config::KEY_VALUE_SEP = '='; // key-value separator const char gu::Config::ESCAPE = '\\'; // escape symbol void gu::Config::parse ( std::vector >& params_vector, const std::string& param_list) { assert(params_vector.empty()); // we probably want a clean list if (param_list.empty()) return; std::vector pv = gu::tokenize (param_list, PARAM_SEP, ESCAPE); for (size_t i = 0; i < pv.size(); ++i) { std::vector kvv = gu::tokenize (pv[i], KEY_VALUE_SEP, ESCAPE, true); assert(kvv.size() > 0); gu::trim(kvv[0]); const std::string& key = kvv[0]; if (!key.empty()) { if (kvv.size() == 1) { gu_throw_error(EINVAL) <<"Key without value: '" << key <<"' at position '" << i << "' in parameter list."; } if (kvv.size() > 2) { gu_throw_error(EINVAL) <<"More than one value for key '" << key <<"' at '" << pv[i] << "' in parameter list."; } gu::trim(kvv[1]); std::string& value = kvv[1]; params_vector.push_back(std::make_pair(key, value)); } else if (kvv.size() > 1) { gu_throw_error(EINVAL) << "Empty key at '" << pv[i] << "' in parameter list."; } } } void gu::Config::parse (const std::string& param_list) { if (param_list.empty()) return; std::vector > pv; parse (pv, param_list); bool not_found(false); for (size_t i = 0; i < pv.size(); ++i) { const std::string& key (pv[i].first); const std::string& value(pv[i].second); try { set(key, value); } catch (NotFound& e) { log_error << "Unrecognized parameter '" << key << '\''; /* Throw later so that all invalid parameters get logged.*/ not_found = true; } log_debug << "Set parameter '" << key << "' = '" << value << "'"; } if (not_found) throw gu::NotFound(); } gu::Config::Config() : params_() {} std::function gu::Config::deprecation_check_func_ = check_deprecated; void gu::Config::set_longlong (const std::string& key, long long val) { const char* num_mod = ""; /* Shift preserves sign! */ if (val != 0) { if (!(val & ((1LL << 40) - 1))) { val >>= 40; num_mod = "T"; } else if (!(val & ((1 << 30) - 1))) { val >>= 30; num_mod = "G"; } else if (!(val & ((1 << 20) - 1))) { val >>= 20; num_mod = "M"; } else if (!(val & ((1 << 10) - 1))) { val >>= 10; num_mod = "K"; } } std::ostringstream ost; ost << val << num_mod; set (key, ost.str()); } void gu::Config::key_check (const std::string& key) { if (key.size() == 0) { gu_throw_error(EINVAL) << "Empty key."; } } void gu::Config::check_conversion (const char* str, const char* endptr, const char* type, bool range_error) { if (endptr == str || endptr[0] != '\0' || range_error) { gu_throw_error(EINVAL) << "Invalid value '" << str << "' for " << type << " type."; } } void gu::Config::enable_deprecation_check() { deprecation_check_func_ = check_deprecated; } void gu::Config::disable_deprecation_check() { deprecation_check_func_ = nullptr; } void gu::Config::check_deprecated(const std::string& key, const Parameter& param) { if (param.is_deprecated()) { log_warn << "Parameter '" << key << "' is deprecated and will be removed in future versions"; } } char gu::Config::overflow_char(long long ret) { if (ret >= CHAR_MIN && ret <= CHAR_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (char)."; } short gu::Config::overflow_short(long long ret) { if (ret >= SHRT_MIN && ret <= SHRT_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (short)."; } int gu::Config::overflow_int(long long ret) { if (ret >= INT_MIN && ret <= INT_MAX) return ret; gu_throw_error(EOVERFLOW) << "Value " << ret << " too large for requested type (int)."; } void gu::Config::print (std::ostream& os, bool const notset) const { struct _print_param { void operator() (std::ostream& os, bool const notset, param_map_t::const_iterator& pi) { const Parameter& p(pi->second); if (p.is_set() || notset) { os << pi->first << " = " << p.value() << "; "; } } } print_param; for (param_map_t::const_iterator pi(params_.begin()); pi != params_.end(); ++pi) { print_param(os, notset, pi); } } std::ostream& gu::operator<<(std::ostream& ost, const gu::Config& c) { c.print(ost); return ost; } gu_config_t* gu_config_create (void) { try { return (reinterpret_cast(new gu::Config())); } catch (gu::Exception& e) { log_error << "Failed to create configuration object: " << e.what(); return 0; } } void gu_config_destroy (gu_config_t* cnf) { if (cnf) { gu::Config* conf = reinterpret_cast(cnf); delete conf; } else { log_error << "Null configuration object in " << __FUNCTION__; assert (0); } } static int config_check_set_args (gu_config_t* cnf, const char* key, const char* func) { if (cnf && key && key[0] != '\0') return 0; if (!cnf) { log_fatal << "Null configuration object in " << func; } if (!key) { log_fatal << "Null key in " << func; } else if (key[0] == '\0') { log_fatal << "Empty key in " << func; } assert (0); return -EINVAL; } static int config_check_get_args (gu_config_t* cnf, const char* key, const void* val_ptr, const char* func) { if (cnf && key && key[0] != '\0' && val_ptr) return 0; if (!cnf) { log_error << "Null configuration object in " << func; } if (!key) { log_error << "Null key in " << func; } else if (key[0] == '\0') { log_error << "Empty key in " << func; } if (!val_ptr) { log_error << "Null value pointer in " << func; } assert (0); return -EINVAL; } bool gu_config_has (gu_config_t* cnf, const char* key) { if (config_check_set_args (cnf, key, __FUNCTION__)) return false; gu::Config* conf = reinterpret_cast(cnf); return (conf->has (key)); } bool gu_config_is_set (gu_config_t* cnf, const char* key) { if (config_check_set_args (cnf, key, __FUNCTION__)) return false; gu::Config* conf = reinterpret_cast(cnf); return (conf->is_set (key)); } int gu_config_add (gu_config_t* cnf, const char* key, const char* const val, int flags) { if (config_check_set_args (cnf, key, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { if (val != NULL) conf->add (key, val, flags); else conf->add (key, flags); return 0; } catch (std::exception& e) { log_error << "Error adding parameter '" << key << "': " << e.what(); return -1; } catch (...) { log_error << "Unknown exception adding parameter '" << key << "'"; return -1; } } int gu_config_get_string (gu_config_t* cnf, const char* key, const char** val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key).c_str(); return 0; } catch (gu::NotFound&) { return 1; } } int gu_config_get_int64 (gu_config_t* cnf, const char* key, int64_t* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_double (gu_config_t* cnf, const char* key, double* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_ptr (gu_config_t* cnf, const char* key, void** val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } int gu_config_get_bool (gu_config_t* cnf, const char* key, bool* val) { if (config_check_get_args (cnf, key, val, __FUNCTION__)) return -EINVAL; gu::Config* conf = reinterpret_cast(cnf); try { *val = conf->get(key); return 0; } catch (gu::NotFound&) { return 1; } catch (gu::NotSet&) { return 1; } catch (gu::Exception& e) { log_error << "Failed to parse parameter '" << key << "': " << e.what(); return -e.get_errno(); } } #include void gu_config_set_string (gu_config_t* cnf, const char* key, const char* val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); assert (cnf); gu::Config* conf = reinterpret_cast(cnf); conf->set (key, val); } void gu_config_set_int64 (gu_config_t* cnf, const char* key, int64_t val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set (key, val); } void gu_config_set_double (gu_config_t* cnf, const char* key, double val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } void gu_config_set_ptr (gu_config_t* cnf, const char* key, const void* val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } void gu_config_set_bool (gu_config_t* cnf, const char* key, bool val) { if (config_check_set_args (cnf, key, __FUNCTION__)) abort(); gu::Config* conf = reinterpret_cast(cnf); conf->set(key, val); } ssize_t gu_config_print (gu_config_t* cnf, char* buf, ssize_t buf_len) { std::ostringstream os; os << *(reinterpret_cast(cnf)); const std::string& str = os.str(); strncpy (buf, str.c_str(), buf_len - 1); buf[buf_len - 1] = '\0'; return str.length(); } galera-4-26.4.25/galerautils/src/gu_init.c000644 000164 177776 00000000772 15107057155 021427 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013-2016 Codership Oy * * $Id$ */ #include "gu_conf.h" #include "gu_limits.h" #include "gu_abort.h" #include "gu_crc32c.h" void gu_init (gu_log_cb_t log_cb) { gu_conf_set_log_callback (log_cb); /* this is needed in gu::MMap::sync() */ size_t const page_size = GU_PAGE_SIZE; if (page_size & (page_size - 1)) { gu_fatal("GU_PAGE_SIZE(%zu) is not a power of 2", GU_PAGE_SIZE); gu_abort(); } gu_crc32c_configure(); } galera-4-26.4.25/galerautils/src/gu_asio_error_category.hpp000644 000164 177776 00000002271 15107057155 025066 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2020 Codership Oy // #ifndef GU_ASIO_ERROR_CATEGORY_HPP #define GU_ASIO_ERROR_CATEGORY_HPP #ifndef GU_ASIO_IMPL #error This header should not be included directly. #endif // GU_ASIO_IMPL #include "asio/error.hpp" namespace gu { class AsioErrorCategory { public: AsioErrorCategory(const asio::error_category& category) : category_(category) { } AsioErrorCategory(const AsioErrorCategory&) = delete; AsioErrorCategory& operator=(const AsioErrorCategory&) = delete; const asio::error_category& native() const { return category_; } bool operator==(const AsioErrorCategory& other) const { return (category_ == other.category_); } bool operator!=(const AsioErrorCategory& other) const { return not (*this == other); } private: const asio::error_category& category_; }; } extern gu::AsioErrorCategory gu_asio_system_category; extern gu::AsioErrorCategory gu_asio_misc_category; #ifdef GALERA_HAVE_SSL extern gu::AsioErrorCategory gu_asio_ssl_category; #endif // GALERA_HAVE_SSL #endif // GU_ASIO_ERROR_CATEGORY_HPP galera-4-26.4.25/galerautils/src/gu_reserved_container.hpp000644 000164 177776 00000017654 15107057155 024721 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013-2020 Codership Oy /*! * ReservedContainer template. It is a wrapper for a container and a reserved * buffer to allocate elements from. * * For more rationale see * http://src.chromium.org/chrome/trunk/src/base/containers/stack_container.h * * It is not called "StackContainer" because it is not only for objects * allocated on the stack. * * $Id$ */ #ifndef _GU_RESERVED_CONTAINER_ #define _GU_RESERVED_CONTAINER_ #include "gu_logger.hpp" #include // size_t, ptrdiff_t and NULL #include // malloc() and free() #include #include // placement new and std::bad_alloc // GU_ALIGNOF macro to support GCC 4.4. Should be removed after // support for GCC 4.4 is not needed anymore. #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ == 4) # define GU_ALIGNOF __alignof__ # else # define GU_ALIGNOF alignof # endif // (__GNUC__ == 4 && __GNUC_MINOR__ == 4) #endif // __GNUG__ namespace gu { template class AlignedBuffer { public: T* base_ptr() { return reinterpret_cast (buf_); } const T* base_ptr() const { return reinterpret_cast(buf_); } size_t size() const { return capacity; } private: // The buffer itself. It is not of type T because we don't want the // constructors and destructors to be automatically called. Define a POD // buffer of the right size instead. typename std::aligned_storage::type buf_[capacity]; }; /*! * ReservedAllocator is an allocator for STL containers that can use a * prealocated buffer (supplied at construction time) for initial container * storage allocation. If the number of elements exceeds buffer capacity, it * overflows to heap. * * This does not derive from std::allocator, but implements the whole thing. * * NOTE1: container must support reserve() method. * * NOTE2: it won't work with containers that require allocator to have default * constructor, like std::basic_string */ template class ReservedAllocator { public: typedef AlignedBuffer Buffer; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; typedef size_t size_type; // making size_type unsigned int does not seem to reduce footprint typedef ptrdiff_t difference_type; template struct rebind { typedef ReservedAllocator other; }; T* address(T& t) const { return &t; } const T* address(const T& t) const { return &t; } size_type max_size() const { return size_type(-1)/2/sizeof(T); } void construct (T* const p, const T& t) const { new (p) T(t); } void destroy (T* const p) const { p->~T(); } // Storage allocated from this can't be deallocated from other bool operator==(const ReservedAllocator& other) const { return (buffer_ == other.buffer_); } bool operator!=(const ReservedAllocator& other) const { return !(*this == other); } ReservedAllocator(Buffer& buf, size_type n = 0) : buffer_(&buf), used_(n) {} ReservedAllocator(const ReservedAllocator& other) : buffer_(other.buffer_), used_(other.used_) { // log_debug << "Copy ctor\n"; } template ReservedAllocator(const ReservedAllocator&) : buffer_(NULL), used_(reserved) { // log_debug << "Rebinding ctor\n"; } ~ReservedAllocator() {} T* allocate(size_type const n, void* hint = NULL) { if (n == 0) return NULL; if (reserved - used_ >= n /* && buffer_ != NULL */) { assert (buffer_ != NULL); if (diagnostic) { log_info << "Allocating " << n << '/' << (reserved - used_) << " from reserve"; } T* const ret(buffer_->base_ptr() + used_); used_ += n; return ret; } if (n <= max_size()) { if (diagnostic) { log_warn << "Allocating " << n << " from heap"; } void* ret = malloc(n * sizeof(T)); if (NULL != ret) return static_cast(ret); } throw std::bad_alloc(); } void deallocate(T* const p, size_type const n) { if (size_type(p - buffer_->base_ptr()) < reserved) { assert (used_ > 0); if (buffer_->base_ptr() + used_ == p + n) { /* last allocated buffer, can shrink */ used_ -= n; } else { /* cannot recycle reserved space in this case */ assert(p + n <= buffer_->base_ptr() + used_); } } else { free(p); } } size_type used() const { return used_; } private: /* even though we initially allocate buffer in ReservedContainer directly * before this, STL containers insist on copying allocators, so we need * a pointer to buffer to be an explicit member (and waste another 8 bytes*/ Buffer* buffer_; size_type used_; ReservedAllocator& operator=(const ReservedAllocator&); }; /* class ReservedAllocator */ /*! * ReservedContainer is a wrapper for * - fixed size nicely aligned buffer * - ReservedAllocator that uses the buffer * - container type that uses allocator * * the point is to have a container allocated on the stack to use stack buffer * for element storage. */ template class ReservedContainer { public: ReservedContainer() : buffer_ (), /* Actual Allocator instance used by container_ should be * copy-constructed from the temporary passed to container ctor. * Copy-construction preserves pointer to buffer, which is not * temporary. This works at least with std::vector */ container_(Allocator(buffer_)) { /* Make the container use most of the buffer by reserving our buffer * size before doing anything else. */ container_.reserve(reserved); } /* * Getters for the actual container. * * Danger: any copies of this made using the copy constructor must have * shorter lifetimes than the source. The copy will share the same allocator * and therefore the same stack buffer as the original. Use std::copy to * copy into a "real" container for longer-lived objects. */ ContainerType& container() { return container_; } const ContainerType& container() const { return container_; } ContainerType& operator()() { return container_; } const ContainerType& operator()() const { return container_; } /* * Support operator-> to get to the container. * This allows nicer syntax like: * ReservedContainer<...> foo; * std::sort(foo->begin(), foo->end()); */ ContainerType* operator->() { return &container_; } const ContainerType* operator->() const { return &container_; } /* For testing only */ typedef typename ContainerType::value_type ContainedType; const ContainedType* reserved_buffer() const { return buffer_.base_ptr(); } private: typedef ReservedAllocator Allocator; typedef typename Allocator::Buffer Buffer; Buffer buffer_; ContainerType container_; /* Note that container will use another instance of Allocator, copy * constructed from allocator_, so any changes won't be re*/ ReservedContainer(const ReservedContainer&); ReservedContainer& operator=(const ReservedContainer&); }; /* class ReservedContainer */ } /* namespace gu */ #endif /* _GU_RESERVED_CONTAINER_ */ galera-4-26.4.25/galerautils/src/gu_vlq.cpp000644 000164 177776 00000003263 15107057155 021624 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2013 Codership Oy // //! // @file Variable-length quantity encoding for integers // // Unsigned integers: Implementation uses using unsigned LEB128, // see for example http://en.wikipedia.org/wiki/LEB128. // // Signed integers: TODO // #include "gu_vlq.hpp" namespace gu { /* checks helper for the uleb128_decode() */ void uleb128_decode_checks (const byte_t* buf, size_t buflen, size_t offset, size_t avail_bits) { // Check if trying to read past last byte in buffer without // encountering byte without 0x80 bit set. if (offset >= buflen) { gu_throw_error(EINVAL) << "read value is not uleb128 representation, missing " << "terminating byte before end of input"; } assert(avail_bits > 0); if (avail_bits < 7) { // mask to check if the remaining value can be represented // with available bits gu::byte_t mask(~((1 << avail_bits) - 1)); if ((buf[offset] & mask) != 0) { gu_throw_error(EOVERFLOW) << "read value not representable with avail bits: " << avail_bits << " mask: 0x" << std::hex << static_cast(mask) << " buf: 0x" << std::hex << static_cast(buf[offset]) << " excess: 0x" << std::hex << static_cast(mask & buf[offset]); } } } } /* namespace gu */ galera-4-26.4.25/galerautils/src/gu_rset.hpp000644 000164 177776 00000032374 15107057155 022011 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013-2019 Codership Oy */ /*! * @file common RecordSet interface * * Record set is a collection of serialized records of the same type. * * It stores them in an iovec-like collection of buffers before sending * and restores from a single buffer when receiving. * * $Id$ */ #ifndef _GU_RSET_HPP_ #define _GU_RSET_HPP_ #include "gu_vector.hpp" #include "gu_alloc.hpp" #include "gu_digest.hpp" #include "gu_limits.h" // GU_MIN_ALIGNMENT #ifdef GU_RSET_CHECK_SIZE # include "gu_throw.hpp" #endif #include namespace gu { class RecordSet { public: enum Version { EMPTY = 0, VER1, VER2 }; static Version const MAX_VERSION = VER2; static int const VER2_ALIGNMENT = GU_MIN_ALIGNMENT; enum CheckType { CHECK_NONE = 0, CHECK_MMH32, CHECK_MMH64, CHECK_MMH128 }; static int check_size(CheckType ct); /*! return net, payload size of a RecordSet */ size_t size() const { return size_; } /*! return total, padded size of a RecordSet */ size_t serial_size() const { return GU_ALIGN(size_, alignment_); } /*! return number of records in the record set */ int count() const { return count_; } Version version() const { return Version(version_); } CheckType check_type() const { return CheckType(check_type_); } /*! return alignment of the records */ int alignment() const { return alignment_; }; typedef gu::Vector GatherVector; protected: ssize_t size_; int count_; private: byte_t version_; byte_t check_type_; byte_t alignment_; protected: /* ctor for RecordSetOut */ RecordSet (Version const version, CheckType const ct); /* ctor for RecordSetIn */ RecordSet () : size_ (0), count_ (0), version_ (EMPTY), check_type_(CHECK_NONE), alignment_ (Version(0)) {} void init (const byte_t* buf, ssize_t size); ~RecordSet() {} }; /*! specialization of Vector::serialize() method */ template<> inline RecordSet::GatherVector::size_type RecordSet::GatherVector::serialize(void* const buf, size_type const buf_size, size_type const offset /* = 0 */) { byte_t* to (static_cast(buf) + offset); byte_t* const end(static_cast(buf) + buf_size); for (size_type i(0); i < size(); ++i) { const gu::Buf& f((*this)[i]); if (to + f.size > end) { gu_throw_fatal << "attempt to write beyond buffer boundary"; } const gu::byte_t* from(static_cast(f.ptr)); to = std::copy(from, from + f.size, to); } return to - static_cast(buf); } #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic push # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic ignored "-Weffc++" #endif /*! class to store records in buffer(s) to send out */ class RecordSetOutBase : public RecordSet { public: typedef Allocator::BaseName BaseName; /*! return number of disjoint pages in the record set */ ssize_t page_count() const { return bufs_->size() + padding_page_needed(); } /*! return vector of RecordSet fragments in adjusent order */ ssize_t gather (GatherVector& out); protected: RecordSetOutBase() : RecordSet() {} RecordSetOutBase (byte_t* reserved, size_t reserved_size, const BaseName& base_name, /* basename for on-disk * allocator */ CheckType ct, Version version = MAX_VERSION #ifdef GU_RSET_CHECK_SIZE ,ssize_t max_size = 0x7fffffff #endif ); /* this is to emulate partial specialization of function template through * overloading by parameter */ template struct HasPtr{}; /* variant for classes that don't provide ptr() method and need to be * explicitly serialized to internal storage */ template void process (const R& record, const byte_t*& ptr, bool& new_page, size_t const size, bool, HasPtr) { byte_t* const dst(alloc(size, new_page)); ptr = dst; #ifdef NDEBUG record.serialize_to (dst, size); #else size_t const ssize (record.serialize_to (dst, size)); assert (ssize == size); #endif } /* variant for classes that have ptr() method and can be either serialized * or referenced */ template void process (const R& record, const byte_t*& ptr, bool& new_page, size_t const size, bool const store, HasPtr) { if (store) { process (record, ptr, new_page, size, true, HasPtr()); } else { ptr = record.ptr(); new_page = true; } } template std::pair append_base (const R& record, bool const store = true, bool const new_record = true) { ssize_t const size (record.serial_size()); #ifdef GU_RSET_CHECK_SIZE if (gu_unlikely(size > max_size_ - size_)) gu_throw_error(EMSGSIZE); #endif bool new_page; const byte_t* ptr; process (record, ptr, new_page, size, store, HasPtr()); prev_stored_ = store; // make sure there is at least one record count_ += new_record || (0 == count_); post_append (new_page, ptr, size); size_ += size; return std::pair(ptr, size); } private: #ifdef GU_RSET_CHECK_SIZE ssize_t const max_size_; #endif Allocator alloc_; Hash check_; Vector bufs_; bool prev_stored_; inline bool padding_page_needed() const { return (size_ % alignment()); } inline byte_t* alloc(size_t const size, bool& new_page) { byte_t* const ret(alloc_.alloc (size, new_page)); new_page = (new_page || !prev_stored_); return ret; } inline void post_alloc (bool const new_page, const byte_t* const ptr, ssize_t const size) { if (new_page) { Buf b = { ptr, size }; bufs_->push_back (b); } else { bufs_->back().size += size; } } inline void post_append (bool const new_page, const byte_t* const ptr, ssize_t const size) { check_.append (ptr, size); post_alloc (new_page, ptr, size); } int header_size () const; int header_size_max () const; /* Writes the header to the end of provided buffer, returns header * offset from ptr */ ssize_t write_header (byte_t* ptr, ssize_t size); }; /*! This is a small wrapper template for RecordSetOutBase to avoid templating * the whole thing instead of just the two append methods. */ template class RecordSetOut : public RecordSetOutBase { public: typedef RecordSetOutBase::BaseName BaseName; RecordSetOut() : RecordSetOutBase() {} RecordSetOut (byte_t* reserved, size_t reserved_size, const BaseName& base_name, CheckType ct, Version version = MAX_VERSION #ifdef GU_RSET_CHECK_SIZE ,ssize_t max_size = 0x7fffffff #endif ) : RecordSetOutBase (reserved, reserved_size, base_name, ct, version #ifdef GU_RSET_CHECK_SIZE ,max_size #endif ) {} std::pair append (const R& r) { return append_base (r); // return append_base (r); old append_base() method } std::pair append (const void* const src, ssize_t const size, bool const store = true, bool const new_record = true) { assert (src); assert (size); BufWrap bw (src, size); return append_base (bw, store, new_record); // return append_base (src, size, store); - old append_base() method } private: /*! a wrapper class to represent ptr and size as a serializable object: * simply defines serial_size(), ptr() and serialize_to() methods */ class BufWrap { const byte_t* const ptr_; size_t const size_; public: BufWrap (const void* const ptr, size_t const size) : ptr_(reinterpret_cast(ptr)), size_(size) {} size_t serial_size() const { return size_; } const byte_t* ptr() const { return ptr_; } size_t serialize_to (byte_t* const dst, size_t) const { ::memcpy (dst, ptr_, size_); return size_; } }; RecordSetOut (const RecordSetOut&); RecordSetOut& operator = (const RecordSetOut&); }; /* class RecordSetOut */ /*! class to recover records from a buffer */ class RecordSetInBase : public RecordSet { public: RecordSetInBase (const byte_t* buf,/* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_now = true); /* checksum now */ /* this is a "delayed constructor", for the object created empty */ void init (const byte_t* buf, /* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_now = true); /* checksum now */ void rewind() const { next_ = begin_; } void checksum() const; // throws if checksum fails uint64_t get_checksum() const; gu::Buf buf() const { gu::Buf ret = { head_, ssize_t(serial_size()) }; return ret; } protected: template void next_base (Buf& n) const { if (gu_likely (next_ < size_)) { size_t const next_size(R::serial_size(head_ + next_, size_ -next_)); /* sanity check */ if (gu_likely (next_ + next_size <= size_t(size_))) { n.ptr = head_ + next_; n.size = next_size; next_ += next_size; return; } throw_error (E_FAULT); } assert (next_ == size_); throw_error (E_PERM); } template R next_base () const { if (gu_likely (next_ < size_)) { R const rec(head_ + next_, size_ - next_); size_t const tmp_size(rec.serial_size()); /* sanity check */ if (gu_likely (next_ + tmp_size <= size_t(size_))) { next_ += tmp_size; return rec; } throw_error (E_FAULT); } assert (next_ == size_); throw_error (E_PERM); } private: const byte_t* head_; /* pointer to header */ ssize_t mutable next_; /* offset to next record */ short begin_; /* offset to first record */ /* size_ from parent class is offset past all records */ /* takes total size of the supplied buffer */ void parse_header_v1_2 (size_t size); enum Error { E_PERM, E_FAULT }; GU_NORETURN void throw_error (Error code) const; /* shallow copies here - we're not allocating anything */ RecordSetInBase (const RecordSetInBase& r) : RecordSet (r), head_ (r.head_), next_ (r.next_), begin_ (r.begin_) {} RecordSetInBase& operator= (const RecordSetInBase r); #if 0 { std::swap(head_, r.head_); std::swap(next_, r.next_); std::swap(begin, r.begin_); } #endif }; /* class RecordSetInBase */ /*! This is a small wrapper template for RecordSetInBase to avoid templating * the whole thing instead of just the two next methods. */ template class RecordSetIn : public RecordSetInBase { public: RecordSetIn (const void* buf,/* pointer to the beginning of buffer */ size_t size, /* total size of buffer */ bool check_first = true) /* checksum now */ : RecordSetInBase (reinterpret_cast(buf), size, check_first) {} RecordSetIn () : RecordSetInBase (NULL, 0, false) {} void next (Buf& n) const { next_base (n); } R next () const { return next_base (); } }; /* class RecordSetIn */ #if defined(__GNUG__) # if (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) # pragma GCC diagnostic pop # endif // (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) || (__GNUC__ > 4) #endif } /* namespace gu */ #endif /* _GU_RSET_HPP_ */ galera-4-26.4.25/galerautils/src/gu_types.h000644 000164 177776 00000000763 15107057155 021635 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2013 Codership Oy /** * @file Location of some "standard" types definitions * * $Id$ */ #ifndef _gu_types_h_ #define _gu_types_h_ #include /* intXX_t and friends */ #include /* bool */ #include /* ssize_t */ #include /* ptrdiff_t */ #include /* off_t */ #ifdef __cplusplus extern "C" { #endif typedef unsigned char gu_byte_t; #ifdef __cplusplus } #endif #endif /* _gu_types_h_ */ galera-4-26.4.25/galerautils/src/gu_serialize.hpp000644 000164 177776 00000035717 15107057155 023027 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy */ /*! * @file Helper templates for serialization/unserialization. * As we are usually working on little endian platforms, integer * storage order is little-endian - in other words we use "Galera" * order, which is by default little-endian. * * What is going on down there? Templates are good. However we do * not serialize the value of size_t variable into sizeof(size_t) * bytes. We serialize it into a globally consistent, fixed number * of bytes, regardless of the local size of size_t variable. * * Hence templating by the source variable size should not be used. * Instead there are functions/templates that serialize to an explicit * number of bytes. * * @todo Templates are safe to use with integer types only. Adjust them * to work also with classes that have special serialization * routines. * @todo Make buffer serialization functions Buffer class methods. * @todo Alignment issues. */ #ifndef GU_SERIALIZE_HPP #define GU_SERIALIZE_HPP #include "gu_exception.hpp" #include "gu_byteswap.hpp" #include "gu_buffer.hpp" #include "gu_macros.hpp" #include "gu_utils.hpp" #include #include // ::memcpy() namespace gu { template inline size_t serial_size(const T& t) { return t.serial_size(); } template <> inline size_t serial_size(const uint8_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint16_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint32_t& b) { return sizeof(b); } template <> inline size_t serial_size(const uint64_t& b) { return sizeof(b); } class SerializationException : public Exception { public: SerializationException(size_t ret, size_t buflen); }; /* * Non-checking serialization template helpers for cases where buffer size * check is redundant */ template inline size_t serialize_helper(const FROM& f, void* const buf, size_t const offset) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer1); GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer2); GU_COMPILE_ASSERT(sizeof(FROM) <= sizeof(TO), size_differs); TO const tmp(htog(f)); ::memcpy(ptr_offset(buf, offset), &tmp, sizeof(tmp)); return offset + sizeof(tmp); } template inline size_t unserialize_helper(const void* const buf, size_t const offset, TO& t) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer1); GU_COMPILE_ASSERT(std::numeric_limits::is_integer, not_integer2); GU_COMPILE_ASSERT(sizeof(FROM) <= sizeof(TO), size_differs); FROM tmp; ::memcpy(&tmp, ptr_offset(buf, offset), sizeof(tmp)); t = gtoh(tmp); return offset + sizeof(tmp); } /* General serialization templates for numeric types */ template GU_FORCE_INLINE size_t serialize(const FROM& f, void* const buf, size_t const offset) { return serialize_helper(f, buf, offset); } template GU_FORCE_INLINE size_t unserialize(const void* const buf, size_t const offset, TO& t) { return unserialize_helper(buf, offset, t); } /* The following templates force explicit size serialization/deserialization * at compile stage */ template GU_FORCE_INLINE size_t serialize1(const T& t, void* const buf, size_t const offset) { return serialize_helper(t, buf, offset); } template GU_FORCE_INLINE size_t unserialize1(const void* const buf, size_t const offset, T& t) { return unserialize_helper(buf, offset, t); } template GU_FORCE_INLINE size_t serialize2(const T& t, void* const buf, size_t const offset) { return serialize_helper(t, buf, offset); } template GU_FORCE_INLINE size_t unserialize2(const void* const buf, size_t const offset, T& t) { return unserialize_helper(buf, offset, t); } template GU_FORCE_INLINE size_t serialize4(const T& t, void* const buf, size_t const offset) { return serialize_helper(t, buf, offset); } template GU_FORCE_INLINE size_t unserialize4(const void* const buf, size_t const offset, T& t) { return unserialize_helper(buf, offset, t); } template GU_FORCE_INLINE size_t serialize8(const T& t, void* const buf, size_t const offset) { return serialize_helper(t, buf, offset); } template GU_FORCE_INLINE size_t unserialize8(const void* const buf, size_t const offset, T& t) { return unserialize_helper(buf, offset, t); } /* * Buffer length checking serialization template helpers */ GU_FORCE_INLINE void check_bounds(size_t need, size_t have) { if (gu_unlikely(need > have)) throw SerializationException(need, have); } template inline size_t serialize_helper(const FROM& f, void* const buf, size_t const buflen, size_t const offset) { size_t const check(offset + sizeof(TO)); gu_trace(check_bounds(check, buflen)); return serialize_helper(f, buf, offset); } template inline size_t unserialize_helper(const void* const buf, size_t const buflen, size_t const offset, TO& t) { size_t const check(offset + sizeof(FROM)); gu_trace(check_bounds(check, buflen)); return unserialize_helper(buf, offset, t); } /* General serialization templates for numeric types */ template GU_FORCE_INLINE size_t serialize(const FROM& f, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(f, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize(const void* const buf, size_t const buflen, size_t const offset, TO& t) { return unserialize_helper(buf, buflen, offset, t); } /* The following templates force explicit size serialization/deserialization * at compile stage */ template GU_FORCE_INLINE size_t serialize1(const T& t, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize1(const void* const buf, size_t const buflen, size_t const offset, T& t) { return unserialize_helper(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize2(const T& t, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize2(const void* const buf, size_t const buflen, size_t const offset, T& t) { return unserialize_helper(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize4(const T& t, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize4(const void* const buf, size_t const buflen, size_t const offset, T& t) { return unserialize_helper(buf, buflen, offset, t); } template GU_FORCE_INLINE size_t serialize8(T const t, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(t, buf, buflen, offset); } template GU_FORCE_INLINE size_t unserialize8(const void* const buf, size_t const buflen, size_t const offset, T& t) { return unserialize_helper(buf, buflen, offset, t); } /* * Templates to serialize arbitrary length buffers */ class RepresentationException : public Exception { public: RepresentationException(size_t need, size_t have); }; template inline size_t serial_size_helper(const Buffer& sb) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, must_be_integer); if (gu_unlikely(sb.size() > std::numeric_limits::max())) throw RepresentationException(sb.size(), sizeof(ST)); return sizeof(ST) + sb.size(); } GU_FORCE_INLINE size_t serial_size1(const Buffer& sb) { return serial_size_helper(sb); } GU_FORCE_INLINE size_t serial_size2(const Buffer& sb) { return serial_size_helper(sb); } GU_FORCE_INLINE size_t serial_size4(const Buffer& sb) { return serial_size_helper(sb); } GU_FORCE_INLINE size_t serial_size8(const Buffer& sb) { return serial_size_helper(sb); } template inline size_t serialize_helper(const Buffer& b, void* const buf, size_t const buflen, size_t offset) { size_t const ret(offset + serial_size_helper(b)); gu_trace(check_bounds(ret, buflen)); offset = serialize_helper(static_cast(b.size()), buf, buflen, offset); // can't use void* in std::copy() byte_t* const ptr(static_cast(buf)); std::copy(b.begin(), b.end(), ptr + offset); return ret; } template inline size_t unserialize_helper(const void* const buf, size_t const buflen, size_t offset, Buffer& b) { GU_COMPILE_ASSERT(std::numeric_limits::is_integer, must_be_integer); ST len(0); size_t ret(offset + sizeof(len)); gu_trace(check_bounds(ret, buflen)); offset = unserialize_helper(buf, buflen, offset, len); ret += len; gu_trace(check_bounds(ret, buflen)); b.resize(len); // can't use void* in std::copy() const byte_t* const ptr(static_cast(buf)); std::copy(ptr + offset, ptr + ret, b.begin()); return ret; } GU_FORCE_INLINE size_t serialize1(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize1(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return unserialize_helper(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize2(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize2(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return unserialize_helper(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize4(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize4(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return unserialize_helper(buf, buflen, offset, b); } GU_FORCE_INLINE size_t serialize8(const Buffer& b, void* const buf, size_t const buflen, size_t const offset) { return serialize_helper(b, buf, buflen, offset); } GU_FORCE_INLINE size_t unserialize8(const void* const buf, size_t const buflen, size_t const offset, Buffer& b) { return unserialize_helper(buf, buflen, offset, b); } } // namespace gu #endif // GU_SERIALIZE_HPP galera-4-26.4.25/galerautils/src/gu_rset.cpp000644 000164 177776 00000044023 15107057155 021776 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2013 Codership Oy */ /*! * @file common RecordSet implementation * * Record set is a collection of serialized records of the same type. * * It stores them in an iovec-like collection of buffers before sending * and restores from a single buffer when receiving. * * $Id$ */ #include "gu_rset.hpp" #include "gu_vlq.hpp" #include "gu_hexdump.hpp" #include "gu_throw.hpp" #include "gu_logger.hpp" #include "gu_serialize.hpp" #include "gu_hash.h" #include "gu_limits.h" #include namespace gu { int RecordSet::check_size (RecordSet::CheckType const ct) { switch (ct) { case RecordSet::CHECK_NONE: return 0; case RecordSet::CHECK_MMH32: return 4; case RecordSet::CHECK_MMH64: return 8; case RecordSet::CHECK_MMH128: return 16; #define MAX_CHECKSUM_SIZE 16 } log_fatal << "Non-existing RecordSet::CheckType value: " << ct; abort(); } #define VER1_2_CRC_SIZE sizeof(uint32_t) static inline int header_size_max_v1() { return 1 + /* version + checksum type */ 9 + /* max payload size in vlq format */ 9 + /* max record count in vlq format */ VER1_2_CRC_SIZE; /* header checksum */ } #define VER2_ALIGNMENT gu::RecordSet::VER2_ALIGNMENT static inline int header_size_max_v2() { int const ret( 1 + /* version + checksum type */ 9 + /* max payload size in vlq format */ 9 + /* max record count in vlq format */ 1 + /* alignment padding */ VER1_2_CRC_SIZE /* header checksum */ ); GU_COMPILE_ASSERT((ret % VER2_ALIGNMENT) == 0, bad_max_size); return ret; } inline int RecordSetOutBase::header_size_max() const { switch (version()) { case EMPTY: assert (0); break; case VER1: return header_size_max_v1(); case VER2: return header_size_max_v2(); } log_fatal << "Unsupported RecordSet::Version value: " << version(); abort(); } template inline int header_size_v1_2(ssize_t size, int const count) { int hsize(VER2 ? header_size_max_v2() : header_size_max_v1()); assert (size > hsize); assert (count > 0); /* need to converge on the header size as it depends on the total size */ do { int new_hsize = 1 + /* version + checksum type */ uleb128_size(size) + /* size in vlq format */ uleb128_size(count) + /* count in vlq format */ VER1_2_CRC_SIZE; /* header checksum */ if (VER2) new_hsize = GU_ALIGN(new_hsize, VER2_ALIGNMENT); assert (new_hsize <= hsize); if (new_hsize == hsize) break; size -= hsize - new_hsize; hsize = new_hsize; } while (true); assert (hsize > 0); assert (size > hsize); return hsize; } static int header_size_v1(ssize_t size, ssize_t const count) { return header_size_v1_2(size, count); } /* * Since in VER2 we want everything to be aligned to 8 bytes, we can have * an important optimization for smaller sets by fitting count and size * into bytes 1-3 of the header, thus fitting whole header into 8 bytes: * * | BYTE 0 | BYTE 1 | BYTE 2 | BYTE 3 | 4 bytes * | VERSION BYTE | COUNT BITS | SIZE BITS | header checksum * * Optimizing for maximum count of 16-byte records, so size = count*16. * This will allow to encode up to 1K records of total 16K size. This is * more than can be represented by VLQ in 1 and 2 bytes respectively: * 127 records of total 16K-1 size. * * Assuming little-endian encoding. */ #define VER2_COUNT_SIZE_LEN 24 /* total bits avaiable in short version */ #define VER2_COUNT_LEN ((VER2_COUNT_SIZE_LEN - 4) / 2) /* bits for count */ /* max count value we can encode in VER2_COUNT_LEN bits [1, ...] */ #define VER2_COUNT_MAX (1 << VER2_COUNT_LEN) #define VER2_COUNT_OFF 8 /* count offset: 8 bits of byte 0 */ #define VER2_COUNT_MASK ((VER2_COUNT_MAX - 1) << VER2_COUNT_OFF) #define VER2_COUNT(h) ((((h) & VER2_COUNT_MASK) >> VER2_COUNT_OFF) + 1) #define VER2_SIZE_LEN (VER2_COUNT_SIZE_LEN - VER2_COUNT_LEN) /* bits for size*/ /* max size value we can encode in VER2_SIZE_LEN bits [1, ...] */ #define VER2_SIZE_MAX (1 << VER2_SIZE_LEN) #define VER2_SIZE_OFF (VER2_COUNT_OFF + VER2_COUNT_LEN) /* size offset*/ #define VER2_SIZE_MASK ((VER2_SIZE_MAX - 1) << VER2_SIZE_OFF) #define VER2_SIZE(h) ((((h) & VER2_SIZE_MASK) >> VER2_SIZE_OFF) + 1) #define VER2_REDUCTION (2 * VER2_ALIGNMENT) #define VER2_SHORT_FLAG 0x08 /* flag to distinguish between short and long ver */ static int header_size_v2(ssize_t const size, int const count) { assert(count > 0); // should never send empty recordsets /* if we potentially can fit count and size in 3 bytes * header (and the whole set) can be shortened by 16 */ bool const can_reduce((count <= VER2_COUNT_MAX) && ((size - VER2_REDUCTION) <= VER2_SIZE_MAX)); if (can_reduce) { return header_size_max_v2() - VER2_REDUCTION; } else { return header_size_v1_2(size, count); } } inline int RecordSetOutBase::header_size() const { switch (version()) { case EMPTY: assert(0); break; case VER1: return header_size_v1(size_, count_); case VER2: return header_size_v2(size_, count_); } log_fatal << "Unsupported RecordSet::Version value: " << version(); abort(); } ssize_t RecordSetOutBase::write_header (byte_t* const buf, ssize_t const size) { assert((uintptr_t(buf) % GU_WORD_BYTES) == 0); int const csize(check_size(check_type())); assert((csize % alignment()) == 0); assert (header_size_max() + csize <= size); int const hdr_size(header_size()); ssize_t const hdr_offset(header_size_max() - hdr_size); assert (hdr_offset >= 0); assert ((hdr_offset % alignment()) == 0); size_ -= hdr_offset; int off(hdr_offset); /* Version byte: upper 4 bits: version, lower 3 - checksum type */ byte_t ver_byte((byte_t(version()) << 4) | (byte_t(check_type()) & 0x07)); assert(0 == (ver_byte & VER2_SHORT_FLAG)); switch (version()) { case VER2: if (VER2_REDUCTION == off) /* 4 byte header version */ { /* comparison above is a valid condition only if VER2_SIZE_MAX is * greater than 0x3fff, otherwise there may be ambiguity about the * two encoding methods */ GU_COMPILE_ASSERT(VER2_SIZE_MAX > 0x3FFF, fix_condition); assert(count_ <= VER2_COUNT_MAX); assert(size_ <= VER2_SIZE_MAX); assert(uintptr_t(buf + off)%sizeof(uint32_t) == 0); uint32_t const h((uint32_t(size_ - 1) << VER2_SIZE_OFF) | (uint32_t(count_- 1) << VER2_COUNT_OFF) | (ver_byte | VER2_SHORT_FLAG)); gu::serialize4(h, buf, off); assert(off + 8 == header_size_max()); break; } else /* long header version */ { /* zero up potential padding bytes */ ::memset(buf + off + 4, 0, hdr_size - 8); } /* fall through *//* to uleb encoding */ case VER1: buf[off] = ver_byte; off += 1; off += uleb128_encode(size_, buf + off, size - off); uleb128_encode(count_, buf + off, size - off); break; case EMPTY: assert(0); } assert(off <= header_size_max() - 4); off = hdr_offset + hdr_size - 4; // compensate for padding gap in VER2 /* write header CRC */ uint32_t const crc(gu_fast_hash32(buf + hdr_offset, off - hdr_offset)); off = gu::serialize4(crc, buf, off); assert((off % alignment()) == 0); assert(header_size_max() == off); /* append payload checksum */ if (check_type() != CHECK_NONE) { assert (csize <= size - off); check_.append (buf + hdr_offset, off - hdr_offset); /* append header */ check_.gather (buf + off, csize); } return hdr_offset; } ssize_t RecordSetOutBase::gather (GatherVector& out) { if (count_) { assert(count_ > 0); assert(size_ > 0); #ifndef NDEBUG ssize_t const saved_size(size_); #endif /* NDEBUG */ unsigned int pad_size(0); if (gu_likely(VER2 == version())) { /* make sure size_ is padded to multiple of VER2_ALIGNMENT */ int const dangling_bytes(size_ % VER2_ALIGNMENT); if(dangling_bytes) { assert(dangling_bytes < VER2_ALIGNMENT); pad_size = VER2_ALIGNMENT - dangling_bytes; bool new_page; byte_t* const pad_ptr(alloc(pad_size, new_page)); /* zero up padding bytes to pacify valgrind: * these bytes are checksummed along with the rest of the set * and it makes valgrind unhappy if they are not initialized. * However they don't need to be initialized to anything specific * - they just need to remain unaltered */ ::memset(pad_ptr, 0, pad_size); post_append(new_page, pad_ptr, pad_size); // note that size_ should be preserved and not increased here assert(saved_size == size_); } } byte_t* const ptr (static_cast(const_cast(bufs_->front().ptr))); ssize_t const offset = write_header (ptr, bufs_->front().size); bufs_->front().ptr = ptr + offset; bufs_->front().size -= offset; // size_ is taken care of in write_header() out->insert (out->end(), bufs_->begin(), bufs_->end()); assert(((size_ + pad_size) % alignment()) == 0); return size_ + pad_size; } else { return 0; } } static inline byte_t rset_alignment(RecordSet::Version ver) { return (ver >= RecordSet::VER2 ? VER2_ALIGNMENT : 1); } RecordSet::RecordSet (Version ver, CheckType const ct) : size_ (0), count_ (0), version_ (ver), check_type_(ct), alignment_ (rset_alignment(ver)) { assert(uint(version_) <= MAX_VERSION); assert(uint(check_type_) < VER2_SHORT_FLAG); } RecordSetOutBase::RecordSetOutBase (byte_t* reserved, size_t reserved_size, const BaseName& base_name, CheckType const ct, Version const version #ifdef GU_RSET_CHECK_SIZE ,ssize_t const max_size #endif ) : RecordSet (version, ct), #ifdef GU_RSET_CHECK_SIZE max_size_ (max_size), #endif alloc_ (base_name, reserved, reserved_size), check_ (), bufs_ (), prev_stored_(true) { /* reserve space for header */ size_ = header_size_max() + check_size(check_type()); bool unused; byte_t* ptr(alloc_.alloc (size_, unused)); assert(0 == uintptr_t(ptr) % GU_WORD_BYTES); Buf b = { ptr, size_ }; bufs_->push_back (b); } static inline RecordSet::Version header_version (const byte_t* buf, ssize_t const size) { assert (NULL != buf); assert (size > 0); uint const ver((buf[0] & 0xf0) >> 4); assert (ver > 0); if (gu_likely(ver <= RecordSet::MAX_VERSION)) return RecordSet::Version(ver); gu_throw_error (EPROTO) << "Unsupported RecordSet version: " << ver; } static inline RecordSet::CheckType header_check_type(RecordSet::Version const ver, const byte_t* ptr, ssize_t const size) { assert (size > 0); switch (ver) { case RecordSet::EMPTY: assert(0); return RecordSet::CHECK_NONE; case RecordSet::VER1: case RecordSet::VER2: { int const ct(ptr[0] & 0x07); switch (ct) { case RecordSet::CHECK_NONE: return RecordSet::CHECK_NONE; case RecordSet::CHECK_MMH32: if (RecordSet::VER2 == ver) break; return RecordSet::CHECK_MMH32; case RecordSet::CHECK_MMH64: return RecordSet::CHECK_MMH64; case RecordSet::CHECK_MMH128: return RecordSet::CHECK_MMH128; } gu_throw_error (EPROTO) << "Unsupported RecordSet checksum type: " << ct; } } gu_throw_error (EPROTO) << "Unsupported RecordSet version: " << ver; } void RecordSet::init (const byte_t* const ptr, ssize_t const size) { assert (EMPTY == version_); assert (size >= 0); assert (NULL != ptr || 0 == size); assert (NULL == ptr || 0 != size); if (gu_likely ((ptr && size))) { version_ = header_version (ptr, size); check_type_ = header_check_type (Version(version_), ptr, size); alignment_ = rset_alignment (Version(version_)); } } static inline size_t read_size_count_v1_2(const byte_t* head_, size_t const size, size_t off, ssize_t& size_, int& count_) { off += uleb128_decode (head_ + off, size - off, size_); off += uleb128_decode (head_ + off, size - off, count_); return off; } static inline size_t read_size_count_v2_short(const byte_t* head_, ssize_t& size_, int& count_) { uint32_t const h(gu_le32(*reinterpret_cast(head_))); size_ = VER2_SIZE (h); count_ = VER2_COUNT(h); return sizeof(h); } #define MIN_HEADER_SIZE 8 // it can't be smaller void RecordSetInBase::parse_header_v1_2 (size_t const size) { assert (size > 8); assert (EMPTY != version()); assert (0 != alignment()); size_t off; if (VER2 == version() && (head_[0] & VER2_SHORT_FLAG)) { off = read_size_count_v2_short(head_, size_, count_); } else { off = read_size_count_v1_2(head_, size, 1, size_, count_); off = GU_ALIGN((off + VER1_2_CRC_SIZE), alignment()); // end of header off -= VER1_2_CRC_SIZE; // header checksum } if (gu_unlikely(static_cast(size_) > size)) { gu_throw_error (EPROTO) << "RecordSet size " << size_ << " exceeds buffer size " << size << "\nfirst 4 bytes: " << gu::Hexdump(head_, 4); } if (gu_unlikely(static_cast(size_) < static_cast(count_))) { gu_throw_error (EPROTO) << "Corrupted RecordSet header: count " << count_ << " exceeds size " << size_; } /* verify header CRC */ uint32_t const crc_comp(gu_fast_hash32(head_, off)); uint32_t crc_orig; unserialize4(head_, off, crc_orig); if (gu_unlikely(crc_comp != crc_orig)) { gu_throw_error (EPROTO) << "RecordSet header CRC mismatch: " << std::showbase << std::internal << std::hex << std::setfill('0') << std::setw(10) << "\ncomputed: " << crc_comp << "\nfound: " << crc_orig << std::dec; } off += VER1_2_CRC_SIZE; assert((off % alignment()) == 0); /* checksum is between header and records */ begin_ = off + check_size(check_type()); } /* returns false if checksum matched and true if failed */ void RecordSetInBase::checksum() const { int const cs(check_size(check_type())); if (cs > 0) /* checksum records */ { Hash check; check.append (head_ + begin_, serial_size() - begin_); /* records */ check.append (head_, begin_ - cs); /* header */ assert(cs <= MAX_CHECKSUM_SIZE); byte_t result[MAX_CHECKSUM_SIZE]; check.gather(result); const byte_t* const stored_checksum(head_ + begin_ - cs); if (gu_unlikely(memcmp (result, stored_checksum, cs))) { gu_throw_error(EINVAL) << "RecordSet checksum does not match:" << "\ncomputed: " << gu::Hexdump(result, cs) << "\nfound: " << gu::Hexdump(stored_checksum, cs); } } } uint64_t RecordSetInBase::get_checksum() const { unsigned int const checksum_size(check_size(check_type())); const void* const stored_checksum(head_ + begin_ - checksum_size); uint64_t ret(0); if (checksum_size >= sizeof(uint64_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint32_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint16_t)) ret = *(static_cast(stored_checksum)); else if (checksum_size >= sizeof(uint8_t)) ret = *(static_cast(stored_checksum)); return gu::gtoh(ret); } RecordSetInBase::RecordSetInBase (const byte_t* const ptr, size_t const size, bool const check_now) : RecordSet (), head_ (), next_ (), begin_ () { init (ptr, size, check_now); } void RecordSetInBase::init (const byte_t* const ptr, size_t const size, bool const check_now) { assert (EMPTY == version()); RecordSet::init (ptr, size); head_ = ptr; switch (version()) { case EMPTY: return; case VER1: case VER2: assert(0 != alignment()); if (alignment() > 1) assert((uintptr_t(head_) % GU_WORD_BYTES) == 0); parse_header_v1_2(size); // should set begin_ } if (check_now) checksum(); next_ = begin_; assert (size_ > 0); assert (count_ >= 0); assert (count_ <= size_); assert (begin_ > 0); assert (begin_ <= size_); assert (next_ == begin_); } void RecordSetInBase::throw_error (Error code) const { switch (code) { case E_PERM: gu_throw_error (EPERM) << "Access beyond record set end."; case E_FAULT: gu_throw_error (EFAULT) << "Corrupted record set: record extends " << next_ << " beyond set boundary " << size_; } log_fatal << "Unknown error in RecordSetIn."; abort(); } } /* namespace gu */ galera-4-26.4.25/galerautils/src/gu_unordered.hpp000644 000164 177776 00000015673 15107057155 023026 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2010 Codership Oy // //! // @file gu_unordered.hpp unordered_[multi]map definition // // We still have environments where neither boost or std unordered // stuff is available. Wrapper classes are provided for alternate // implementations with standard semantics. // // For usage see either boost or tr1 specifications for unordered_[multi]map // #ifndef GU_UNORDERED_HPP #define GU_UNORDERED_HPP #if defined(HAVE_STD_UNORDERED_MAP) #include #include #define GU_UNORDERED_MAP_NAMESPACE std #elif defined(HAVE_TR1_UNORDERED_MAP) #include #include #define GU_UNORDERED_MAP_NAMESPACE std::tr1 #elif defined(HAVE_BOOST_UNORDERED_MAP_HPP) #include #include #define GU_UNORDERED_MAP_NAMESPACE boost #else #error "no unordered map available" #endif #include "gu_throw.hpp" namespace gu { template class UnorderedHash { public: typedef GU_UNORDERED_MAP_NAMESPACE::hash Type; size_t operator()(const K& k) const { return Type()(k); } }; template size_t HashValue(const K& key) { return UnorderedHash()(key); } template , class P = std::equal_to, class A = std::allocator > class UnorderedSet { typedef GU_UNORDERED_MAP_NAMESPACE::unordered_set type; type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedSet() : impl_() { } explicit UnorderedSet(A a) : impl_(a) { } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } std::pair insert(const value_type& k) { return impl_.insert(k); } iterator insert_unique(const value_type& k) { std::pair ret(insert(k)); if (ret.second == false) gu_throw_fatal << "insert unique failed"; return ret.first; } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } iterator erase(iterator i) { return impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } void clear() { impl_.clear(); } void rehash(size_t n) { impl_.rehash(n); } }; template , class P = std::equal_to, class A = std::allocator > class UnorderedMultiset { typedef GU_UNORDERED_MAP_NAMESPACE::unordered_multiset type; type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedMultiset() : impl_() { } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } iterator insert(const value_type& k) { return impl_.insert(k); } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } std::pair equal_range(const K& key) { return impl_.equal_range(key); } std::pair equal_range(const K& key) const { return impl_.equal_range(key); } iterator erase(iterator i) { return impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } void clear() { impl_.clear(); } void rehash(size_t n) { impl_.rehash(n); } }; template , class P = std::equal_to, class A = std::allocator > > class UnorderedMap { typedef GU_UNORDERED_MAP_NAMESPACE::unordered_map type; type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedMap() : impl_() { } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } std::pair insert(const std::pair& kv) { return impl_.insert(kv); } iterator insert_unique(const std::pair& kv) { std::pair ret(insert(kv)); if (ret.second == false) gu_throw_fatal << "insert unique failed"; return ret.first; } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } iterator erase(iterator i) { return impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } void clear() { impl_.clear(); } void rehash(size_t n) { impl_.rehash(n); } }; template > class UnorderedMultimap { typedef GU_UNORDERED_MAP_NAMESPACE::unordered_multimap type; type impl_; public: typedef typename type::value_type value_type; typedef typename type::iterator iterator; typedef typename type::const_iterator const_iterator; UnorderedMultimap() : impl_() { } void clear() { impl_.clear(); } iterator begin() { return impl_.begin(); } const_iterator begin() const { return impl_.begin(); } iterator end() { return impl_.end(); } const_iterator end() const { return impl_.end(); } iterator insert(const std::pair& kv) { return impl_.insert(kv); } iterator find(const K& key) { return impl_.find(key); } const_iterator find(const K& key) const { return impl_.find(key); } std::pair equal_range(const K& key) { return impl_.equal_range(key); } std::pair equal_range(const K& key) const { return impl_.equal_range(key); } void erase(iterator i) { impl_.erase(i); } size_t size() const { return impl_.size(); } bool empty() const { return impl_.empty(); } }; } #undef GU_UNORDERED_MAP_NAMESPACE #endif // GU_UNORDERED_HPP galera-4-26.4.25/galerautils/src/gu_utils.c000644 000164 177776 00000005110 15107057155 021613 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2023 Codership Oy /** * @file Miscellaneous utility functions * * $Id$ */ #include "gu_utils.h" #include #include #include #include #include #include const char* gu_str2ll (const char* str, long long* ll) { char* ret; int shift = 0; long long llret = strtoll (str, &ret, 0); switch (ret[0]) { case 't': case 'T': shift += 10; /* fall through */ case 'g': case 'G': shift += 10; /* fall through */ case 'm': case 'M': shift += 10; /* fall through */ case 'k': case 'K': shift += 10; ret++; { long long const sign = (llret < 0 ? -1 : 1); unsigned long long ullret = sign * llret; if (ullret == ((ullret << (shift + 1)) >> (shift + 1))) { ullret <<= shift; llret = ullret; llret *= sign; } else { /* ERANGE */ if (llret > 0) llret = LLONG_MAX; else llret = LLONG_MIN; errno = ERANGE; } } /* fall through */ default: *ll = llret; } return ret; } const char* gu_str2dbl (const char* str, double* dbl) { char* ret; *dbl = strtod (str, &ret); return ret; } const char* gu_str2bool (const char* str, bool* b) { size_t const len = strlen(str); int res = -1; /* no conversion */ switch (len) { case 1: switch (str[0]) { case '0': case 'N': case 'n': res = 0; break; case '1': case 'Y': case 'y': res = 1; break; } break; case 2: if (!strcasecmp(str, "on")) res = 1; else if (!strcasecmp(str, "no")) res = 0; break; case 3: if (!strcasecmp(str, "off")) res = 0; else if (!strcasecmp(str, "yes")) res = 1; else if (!strcasecmp(str, "yep")) res = 1; break; case 4: if (!strcasecmp(str, "true")) res = 1; else if (!strcasecmp(str, "sure")) res = 1; else if (!strcasecmp(str, "none")) res = 0; else if (!strcasecmp(str, "nope")) res = 0; else if (!strcasecmp(str, "yeah")) res = 1; break; case 5: if (!strcasecmp(str, "false")) res = 0; break; } *b = (res > 0); return (res >= 0) ? (str + len) : str; } const char* gu_str2ptr (const char* str, void** ptr) { char* ret; *ptr = (void*) (intptr_t)strtoll (str, &ret, 16); return ret; } galera-4-26.4.25/galerautils/src/gu_uuid.hpp000644 000164 177776 00000011321 15107057155 021767 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014-2017 Codership Oy * */ #ifndef _gu_uuid_hpp_ #define _gu_uuid_hpp_ #include "gu_uuid.h" #include "gu_arch.h" // GU_ASSERT_ALIGNMENT #include "gu_assert.hpp" #include "gu_macros.hpp" #include "gu_buffer.hpp" #include "gu_exception.hpp" #include "gu_serialize.hpp" // check_range() #include "gu_utils.hpp" // ptr_offset() #include #include inline bool operator==(const gu_uuid_t& a, const gu_uuid_t& b) { return gu_uuid_compare(&a, &b) == 0; } inline bool operator!=(const gu_uuid_t& a, const gu_uuid_t& b) { return !(a == b); } inline std::ostream& operator<<(std::ostream& os, const gu_uuid_t& uuid) { char uuid_buf[GU_UUID_STR_LEN + 1]; ssize_t ret(gu_uuid_print(&uuid, uuid_buf, sizeof(uuid_buf))); (void)ret; assert(ret == GU_UUID_STR_LEN); uuid_buf[GU_UUID_STR_LEN] = '\0'; return (os << uuid_buf); } namespace gu { class UUIDScanException : public Exception { public: UUIDScanException(const std::string& s); }; } inline ssize_t gu_uuid_from_string(const std::string& s, gu_uuid_t& uuid) { ssize_t ret(gu_uuid_scan(s.c_str(), s.size(), &uuid)); if (gu_unlikely(ret == -1)) throw gu::UUIDScanException(s); return ret; } inline std::istream& operator>>(std::istream& is, gu_uuid_t& uuid) { char str[GU_UUID_STR_LEN + 1]; is.width(GU_UUID_STR_LEN + 1); is >> str; gu_uuid_from_string(str, uuid); return is; } namespace gu { class UUID_base; class UUID; } /* This class should not be used directly. It is here to allow * gu::UUID and gcomm::UUID to inherit from it without the virtual table (* overhead. */ class gu::UUID_base { public: UUID_base() : uuid_(GU_UUID_NIL) {} UUID_base(const void* const node, const size_t node_len) : uuid_() { gu_uuid_generate(&uuid_, node, node_len); } UUID_base(gu_uuid_t uuid) : uuid_(uuid) {} class SerializeException : public Exception { public: SerializeException(size_t need, size_t have); }; static size_t serial_size() { return sizeof(UUID_base().uuid_); } size_t unserialize(const void* const buf, const size_t offset) { size_t const len(serial_size()); ::memcpy(&uuid_, ptr_offset(buf, offset), len); return offset + len; } size_t serialize (void* const buf, const size_t offset) const { size_t const len(serial_size()); ::memcpy(ptr_offset(buf, offset), &uuid_, len); return offset + len; } size_t unserialize(const void* const buf, const size_t buflen, const size_t offset) { gu_trace(gu::check_bounds(offset + serial_size(), buflen)); return unserialize(buf, offset); } size_t serialize (void* const buf, const size_t buflen, const size_t offset) const { gu_trace(gu::check_bounds(offset + serial_size(), buflen)); return serialize(buf, offset); } const gu_uuid_t* ptr() const { return &uuid_; } GU_FORCE_INLINE UUID_base& operator=(const UUID_base& u) { gu_uuid_copy(&uuid_, &u.uuid_); return *this; } bool operator<(const UUID_base& cmp) const { return (gu_uuid_compare(&uuid_, &cmp.uuid_) < 0); } bool operator==(const gu_uuid_t& cmp) const { return (gu_uuid_compare(&uuid_, &cmp) == 0); } bool operator!=(const gu_uuid_t& cmp) const { return !(*this == cmp); } bool operator==(const UUID_base& cmp) const { return (gu_uuid_compare(&uuid_, &cmp.uuid_) == 0); } bool operator!=(const UUID_base& cmp) const { return !(*this == cmp); } bool older(const UUID_base& cmp) const { return (gu_uuid_older(&uuid_, &cmp.uuid_) > 0); } std::ostream& print(std::ostream& os) const { return (os << uuid_); } std::istream& scan(std::istream& is) { return (is >> uuid_); } const gu_uuid_t& operator()() const { return uuid_; } protected: ~UUID_base() {} gu_uuid_t uuid_; private: GU_COMPILE_ASSERT(sizeof(gu_uuid_t) == GU_UUID_LEN, UUID_size); }; /* class UUID_base */ class gu::UUID : public UUID_base { public: UUID() : UUID_base() {} UUID(const void* node, const size_t node_len) : UUID_base(node, node_len) {} UUID(gu_uuid_t uuid) : UUID_base(uuid) {} }; /* class UUID */ namespace gu { inline std::ostream& operator<< (std::ostream& os, const gu::UUID_base& uuid) { uuid.print(os); return os; } inline std::istream& operator>> (std::istream& is, gu::UUID_base& uuid) { uuid.scan(is); return is; } } /* namespace gu */ #endif // _gu_uuid_hpp_ galera-4-26.4.25/galerautils/CMakeLists.txt000644 000164 177776 00000000152 15107057155 021566 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_subdirectory(src) add_subdirectory(tests) galera-4-26.4.25/galerautils/doc/000755 000164 177776 00000000000 15107057160 017571 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/galerautils/doc/Doxyfile000644 000164 177776 00000143667 15107057155 021324 0ustar00jenkinsnogroup000000 000000 # Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GCS # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h *.hpp # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-4-26.4.25/galerautils/README000644 000164 177776 00000001120 15107057155 017702 0ustar00jenkinsnogroup000000 000000 libgalerautils is a library of utilities commonly used by Galera project. Current release includes logging, mutex and malloc debug functions and convenience macros. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY, to the extent permitted by law; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. libgalerautils is free software. Please see the file COPYING for details. For documentation, please see the files in the doc subdirectory. For building and installation instructions please see the INSTALL file. galera-4-26.4.25/galerautils/SConscript000644 000164 177776 00000010452 15107057155 021044 0ustar00jenkinsnogroup000000 000000 # SConscript for building galerautils Import('env', 'machine', 'x86', 'sysname') # # We need to decide on building hardware support for CRC32C at this level # because resulting preprocessor flags are needed in both src/ and tests/ # crc32c_no_hardware = bool(int(ARGUMENTS.get('crc32c_no_hardware', 0))) crc32c_cppflags = '' crc32c_cflags = '' arm64 = False if not crc32c_no_hardware: try: crc32c_check_env = env.Clone() if x86: if sysname == 'sunos': # Ideally we want to simply strip SSE4.2 flag from the resulting # crc32.pic.o # (see http://ffmpeg.org/pipermail/ffmpeg-user/2013-March/013977.html) # but that requires some serious scons-fu, so we just don't # compile hardware support in if host CPU does not have it. from subprocess import check_call check_call("isainfo -v | grep sse4.2 >/dev/null 2>&1", shell=True); # raises exception test_cflags = ' -msse4.2' test_source = """ int main() { /* at least 32-bit functions should be present */ (void)__builtin_ia32_crc32qi(0, 0); (void)__builtin_ia32_crc32hi(0, 0); (void)__builtin_ia32_crc32si(0, 0); return 0; } """ elif any(arch in machine for arch in [ 'aarch64', 'arm64' ]): arm64 = True test_cflags = ' -march=armv8-a+crc' test_source = """ // Here we assume that CPU feature detection and // CRC32 support in hardware is the same thing and // if the former is not available, the latter is // not available as well, so we test for both at // the same time #include #include int main() { #if defined(__linux__) (void)getauxval(AT_HWCAP); #elif defined(__FreeBSD__) unsigned long info; (void)elf_aux_info(AT_HWCAP, &info, sizeof(info)); #else #error Hardware feature detection for OS not supported #endif (void)__crc32b(0, 0); (void)__crc32h(0, 0); (void)__crc32w(0, 0); (void)__crc32d(0, 0); return 0; } """ else: raise Exception('Unsupported architecture: ' + machine) def CheckCompilerSupport(context, test_source): context.Message('Checking for hardware CRC32C support by compiler... ') result = context.TryLink(test_source, '.c') context.Result(result) if not result: raise Exception('Compiler does not support hardware CRC32C intrinsics') crc32c_check_env.Append(CFLAGS = test_cflags) conf = Configure(crc32c_check_env, custom_tests = {'CheckCompilerSupport': CheckCompilerSupport }) conf.CheckCompilerSupport(test_source) # raises exception crc32c_cflags = test_cflags except Exception as e: # from traceback import print_exc # print_exc() crc32c_no_hardware = True nohw_reason = str(e) else: nohw_reason = 'command line options' if crc32c_no_hardware: print('Hardware CRC32C support disabled: ' + nohw_reason) crc32c_cppflags = ' -DGU_CRC32C_NO_HARDWARE' print("CRC32C config" + ": crc32c_no_hardware='" + str(crc32c_no_hardware) + "', crc32c_cppflags='" + crc32c_cppflags + "', crc32c_cflags='" + crc32c_cflags + "'" ) Export('arm64', 'crc32c_no_hardware', 'crc32c_cppflags', 'crc32c_cflags') SConscript(Split('''src/SConscript tests/SConscript''')) galera-4-26.4.25/galerautils/ChangeLog000644 000164 177776 00000002702 15107057155 020603 0ustar00jenkinsnogroup000000 000000 2009-09-20 Alex Added RegEx class for matching strings with POSIX regular expressions. Renamed URL class to URI to better reflect what it does. Added get_host(), get_user() and get_port() methods and a unit test. Modularized galerautils++ unit tests. Version 0.3.5 2009-09-17 Alex Added gu_utils.hpp to hold general-purpose templates and functions (now with to_string() template functions). Logger class cleanups. Exception class cleanups. Added stack tracing macro. New Throw class for composing verbose exception messages. Version 0.3.4 2009-09-01 Alex Added a simple option line parser. Some optimizations and cleanups. Version 0.3.3 2009-07-07 Alex Slightly changed gu_fifo interface. Added gu_lock_step object. Version 0.3.2. 2009-06-21 Alex Moved TO monitor module from GCS to galerautils. Version 0.3.1. 2009-06-08 Alex Started galerautils++ project. Added galerautils.hpp and C++-style logger and assert variants. Version 0.3.0. 2008-11-16 Alex Added gu_fifo_t class for mallocless FIFO queue. Version 0.2.9. 2008-03-23 Alex Added gu_timeval_diff() and gu_clock_diff() functions. Bumped interface version. 2008-02-21 Teemu Made DBUG thread safe. 2007-11-01 Alex Fixed thread safe compilation without MySQL Tagged release 0.2.5 2007-10-18 Alex Fixed compilation. Added gtohl/htogl/gtohs/htogs functions. Tagged release 0.2.4 galera-4-26.4.25/gcomm/000755 000164 177776 00000000000 15107057160 015612 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcomm/src/000755 000164 177776 00000000000 15107057160 016401 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcomm/src/gmcast.cpp000644 000164 177776 00000172675 15107057155 020411 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #include "gmcast.hpp" #include "gmcast_proto.hpp" #include "gcomm/common.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "gcomm/map.hpp" #include "defaults.hpp" #include "gu_convert.hpp" #include "gu_resolver.hpp" #include "gu_asio.hpp" // gu::conf::use_ssl using namespace std::rel_ops; using gcomm::gmcast::Proto; using gcomm::gmcast::ProtoMap; using gcomm::gmcast::Link; using gcomm::gmcast::LinkMap; using gcomm::gmcast::Message; const long gcomm::GMCast::max_retry_cnt_(std::numeric_limits::max()); static void set_tcp_defaults (gu::URI* uri) { // what happens if there is already this parameter? uri->set_option(gcomm::Conf::TcpNonBlocking, gu::to_string(1)); } static bool check_tcp_uri(const gu::URI& uri) { return (uri.get_scheme() == gu::scheme::tcp || uri.get_scheme() == gu::scheme::ssl); } static std::string get_scheme(bool use_ssl, bool dynamic_socket) { if (use_ssl == true && not dynamic_socket) { return gu::scheme::ssl; } return gu::scheme::tcp; } // // Check if the node should stay isolated. // Possible outcomes: // * Return false, node should continue reconnecting and accepting connections // (isolate = 0) // * Return true, node should remain isolated (isolate = 1) // * Throw fatal exception to terminate the backend (isolate = 2) // static inline bool is_isolated(int isolate) { switch (isolate) { case 1: return true; case 2: gu_throw_fatal<< "Gcomm backend termination was " << "requested by setting gmcast.isolate=2."; break; default: break; } return false; } gcomm::GMCast::GMCast(Protonet& net, const gu::URI& uri, const UUID* my_uuid) : Transport (net, uri), version_(check_range(Conf::GMCastVersion, param(conf_, uri, Conf::GMCastVersion, "0"), 0, max_version_ + 1)), segment_ (check_range(Conf::GMCastSegment, param(conf_, uri, Conf::GMCastSegment, "0"), 0, 255)), my_uuid_ (my_uuid ? *my_uuid : UUID(0, 0)), dynamic_socket_ (conf_.has(gu::conf::socket_dynamic) ? param(conf_, uri, gu::conf::socket_dynamic, "false") : false), #ifdef GALERA_HAVE_SSL use_ssl_ (param(conf_, uri, gu::conf::use_ssl, "false")), #else use_ssl_(), #endif // GALERA_HAVE_SSL // @todo: technically group name should be in path component group_name_ (param(conf_, uri, Conf::GMCastGroup, "")), listen_addr_ ( param( conf_, uri, Conf::GMCastListenAddr, get_scheme(use_ssl_, dynamic_socket_) + "://0.0.0.0")), // how to make it IPv6 safe? initial_addrs_(), mcast_addr_ (param(conf_, uri, Conf::GMCastMCastAddr, "")), bind_ip_ (""), mcast_ttl_ (check_range( Conf::GMCastMCastTTL, param(conf_, uri, Conf::GMCastMCastTTL, Defaults::GMCastMCastTTL), 1, 256)), listener_ (), mcast_ (), pending_addrs_(), remote_addrs_ (), addr_blacklist_(), relaying_ (false), isolate_ (0), prim_view_reached_(false), proto_map_ (new ProtoMap()), relay_set_ (), segment_map_ (), self_index_ (std::numeric_limits::max()), time_wait_ (param( conf_, uri, Conf::GMCastTimeWait, Defaults::GMCastTimeWait)), check_period_ ("PT0.5S"), peer_timeout_ (param( conf_, uri, Conf::GMCastPeerTimeout, Defaults::GMCastPeerTimeout)), max_initial_reconnect_attempts_( param(conf_, uri, Conf::GMCastMaxInitialReconnectAttempts, gu::to_string(max_retry_cnt_))), next_check_ (gu::datetime::Date::monotonic()) { log_info << "GMCast version " << version_; if (group_name_ == "") { gu_throw_error (EINVAL) << "Group not defined in URL: " << uri_.to_string(); } set_initial_addr(uri_); try { listen_addr_ = uri_.get_option (Conf::GMCastListenAddr); } catch (gu::NotFound&) {} try { gu::URI uri(listen_addr_); /* check validity of the address */ } catch (gu::Exception&) { /* most probably no scheme, try to append one and see if it succeeds */ listen_addr_ = uri_string(get_scheme(use_ssl_, dynamic_socket_), listen_addr_); gu_trace(gu::URI uri(listen_addr_)); } gu::URI listen_uri(listen_addr_); if (check_tcp_uri(listen_uri) == false) { gu_throw_error (EINVAL) << "listen addr '" << listen_addr_ << "' does not specify supported protocol"; } if (gu::net::resolve(listen_uri).get_addr().is_anyaddr() == false) { // bind outgoing connections to the same address as listening. gu_trace(bind_ip_ = listen_uri.get_host()); } std::string port(Defaults::GMCastTcpPort); try { port = listen_uri.get_port(); } catch (gu::NotSet&) { // if no listen port is set for listen address in the options, // see if base port was configured try { port = conf_.get(BASE_PORT_KEY); } catch (gu::NotSet&) { // if no base port configured, try port from the connection address try { port = uri_.get_port(); } catch (gu::NotSet&) {} } listen_addr_ += ":" + port; } conf_.set(BASE_PORT_KEY, port); listen_addr_ = gu::net::resolve(listen_addr_).to_string(); // resolving sets scheme to tcp, have to rewrite for ssl if (use_ssl_ == true && not dynamic_socket_) { listen_addr_.replace(0, 3, gu::scheme::ssl); } std::set::iterator iaself(initial_addrs_.find(listen_addr_)); if (iaself != initial_addrs_.end()) { log_debug << "removing own listen address '" << *iaself << "' from initial address list"; initial_addrs_.erase(iaself); } if (mcast_addr_ != "") { try { port = param(conf_, uri_, Conf::GMCastMCastPort, port); } catch (gu::NotFound&) {} mcast_addr_ = gu::net::resolve( uri_string(gu::scheme::udp, mcast_addr_, port)).to_string(); } log_info << self_string() << " listening at " << listen_addr_; log_info << self_string() << " multicast: " << mcast_addr_ << ", ttl: " << mcast_ttl_; conf_.set(Conf::GMCastListenAddr, listen_addr_); conf_.set(Conf::GMCastMCastAddr, mcast_addr_); conf_.set(Conf::GMCastVersion, gu::to_string(version_)); conf_.set(Conf::GMCastTimeWait, gu::to_string(time_wait_)); conf_.set(Conf::GMCastMCastTTL, gu::to_string(mcast_ttl_)); conf_.set(Conf::GMCastPeerTimeout, gu::to_string(peer_timeout_)); conf_.set(Conf::GMCastSegment, gu::to_string(segment_)); } gcomm::GMCast::~GMCast() { if (listener_ != 0) close(); delete proto_map_; } void gcomm::GMCast::set_initial_addr(const gu::URI& uri) { const gu::URI::AuthorityList& al(uri.get_authority_list()); for (gu::URI::AuthorityList::const_iterator i(al.begin()); i != al.end(); ++i) { std::string host; try { host = i->host(); } catch (gu::NotSet& ns) { gu_throw_error(EINVAL) << "Unset host in URL " << uri; } if (host_is_any(host)) continue; std::string port; try { port = i->port(); } catch (gu::NotSet&) { try { port = conf_.get(BASE_PORT_KEY); } catch (gu::NotFound&) { port = Defaults::GMCastTcpPort; } catch (gu::NotSet&) { port = Defaults::GMCastTcpPort; } } std::string initial_uri = uri_string(get_scheme(use_ssl_, dynamic_socket_), host, port); std::string initial_addr; try { initial_addr = gu::net::resolve(initial_uri).to_string(); } catch (gu::Exception& ) { log_warn << "Failed to resolve " << initial_uri; continue; } // resolving sets scheme to tcp, have to rewrite for ssl if (use_ssl_ == true && not dynamic_socket_) { initial_addr.replace(0, 3, gu::scheme::ssl); } if (check_tcp_uri(initial_addr) == false) { gu_throw_error (EINVAL) << "initial addr '" << initial_addr << "' is not valid"; } log_debug << self_string() << " initial addr: " << initial_addr; initial_addrs_.insert(initial_addr); } } void gcomm::GMCast::connect_precheck(bool start_prim) { if (!start_prim && initial_addrs_.empty()) { gu_throw_fatal << "No address to connect"; } } void gcomm::GMCast::connect() { pstack_.push_proto(this); log_debug << "gmcast " << uuid() << " connect"; gu::URI listen_uri(listen_addr_); set_tcp_defaults (&listen_uri); listener_ = pnet().acceptor(listen_uri); gu_trace (listener_->listen(listen_uri)); if (!mcast_addr_.empty()) { gu::URI mcast_uri( mcast_addr_ + '?' + gcomm::Socket::OptIfAddr + '=' + gu::URI(listen_addr_).get_host()+'&' + gcomm::Socket::OptNonBlocking + "=1&" + gcomm::Socket::OptMcastTTL + '=' + gu::to_string(mcast_ttl_) ); mcast_ = pnet().socket(mcast_uri); gu_trace(mcast_->connect(mcast_uri)); } if (!initial_addrs_.empty()) { for (std::set::const_iterator i(initial_addrs_.begin()); i != initial_addrs_.end(); ++i) { insert_address(*i, UUID(), pending_addrs_); AddrList::iterator ai(pending_addrs_.find(*i)); AddrList::value(ai).set_max_retries(max_retry_cnt_); gu_trace (gmcast_connect(*i)); } } } void gcomm::GMCast::connect(const gu::URI& uri) { set_initial_addr(uri); connect(); } void gcomm::GMCast::close(bool force) { log_debug << "gmcast " << uuid() << " close"; pstack_.pop_proto(this); if (mcast_) { mcast_->close(); // delete mcast; // mcast = 0; } gcomm_assert(listener_ != 0); listener_->close(); listener_.reset(); segment_map_.clear(); for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { delete ProtoMap::value(i); } proto_map_->clear(); pending_addrs_.clear(); remote_addrs_.clear(); prim_view_reached_ = false; } // // Private // // Find other local endpoint matching to proto static const Proto* find_other_local_endpoint(const gcomm::gmcast::ProtoMap& proto_map, const gcomm::gmcast::Proto* proto) { for (gcomm::gmcast::ProtoMap::const_iterator i(proto_map.begin()); i != proto_map.end(); ++i) { if (i->second != proto && i->second->handshake_uuid() == proto->handshake_uuid()) { return i->second; } } return 0; } // Find other endpoint with same remote UUID static const Proto* find_other_endpoint_same_remote_uuid(const gcomm::gmcast::ProtoMap& proto_map, const gcomm::gmcast::Proto* proto) { for (gcomm::gmcast::ProtoMap::const_iterator i(proto_map.begin()); i != proto_map.end(); ++i) { if (i->second != proto && i->second->remote_uuid() == proto->remote_uuid()) { return i->second; } } return 0; } bool gcomm::GMCast::is_own(const gmcast::Proto* proto) const { assert(proto->remote_uuid() != gcomm::UUID::nil()); if (proto->remote_uuid() != uuid()) { return false; } return find_other_local_endpoint(*proto_map_, proto); } void gcomm::GMCast::blacklist(const gmcast::Proto* proto) { initial_addrs_.erase(proto->remote_addr()); pending_addrs_.erase(proto->remote_addr()); addr_blacklist_.insert(std::make_pair( proto->remote_addr(), AddrEntry(gu::datetime::Date::monotonic(), gu::datetime::Date::monotonic(), proto->remote_uuid()))); } bool gcomm::GMCast::is_not_own_and_duplicate_exists( const Proto* proto) const { assert(proto->remote_uuid() != gcomm::UUID::nil()); const Proto* other(find_other_local_endpoint(*proto_map_, proto)); if (!other) { // Not own // Check if remote UUID matches to self if (proto->remote_uuid() == uuid()) { return true; } // Check if other proto entry with same remote // UUID but different remote address exists. other = find_other_endpoint_same_remote_uuid(*proto_map_, proto); if (other && other->remote_addr() != proto->remote_addr()) { return true; } } return false; } // Erase proto entry in safe manner // 1) Erase from relay_set_ // 2) Erase from proto_map_ // 3) Delete proto entry void gcomm::GMCast::erase_proto(gmcast::ProtoMap::iterator i) { Proto* p(ProtoMap::value(i)); RelayEntry e(p, p->socket().get()); RelaySet::iterator si(relay_set_.find(e)); if (si != relay_set_.end()) { relay_set_.erase(si); } proto_map_->erase(i); delete p; } void gcomm::GMCast::gmcast_accept() { SocketPtr tp; try { tp = listener_->accept(); } catch (gu::Exception& e) { log_warn << e.what(); return; } if (is_isolated(isolate_)) { log_debug << "dropping accepted socket due to isolation"; tp->close(); return; } Proto* peer = new Proto ( *this, version_, tp, listener_->listen_addr(), "", mcast_addr_, segment_, group_name_); std::pair ret = proto_map_->insert(std::make_pair(tp->id(), peer)); if (ret.second == false) { delete peer; gu_throw_fatal << "Failed to add peer to map"; } if (tp->state() == Socket::S_CONNECTED) { peer->send_handshake(); } else { log_debug << "accepted socket is connecting"; } log_debug << "handshake sent"; } void gcomm::GMCast::gmcast_connect(const std::string& remote_addr) { if (remote_addr == listen_addr_) return; gu::URI connect_uri(remote_addr); set_tcp_defaults (&connect_uri); if (!bind_ip_.empty()) { connect_uri.set_option(gcomm::Socket::OptIfAddr, bind_ip_); } SocketPtr tp = pnet().socket(connect_uri); try { tp->connect(connect_uri); } catch (gu::Exception& e) { log_debug << "Connect failed: " << e.what(); // delete tp; return; } Proto* peer = new Proto ( *this, version_, tp, listener_->listen_addr(), remote_addr, mcast_addr_, segment_, group_name_); std::pair ret = proto_map_->insert(std::make_pair(tp->id(), peer)); if (ret.second == false) { delete peer; gu_throw_fatal << "Failed to add peer to map"; } ret.first->second->wait_handshake(); } void gcomm::GMCast::gmcast_forget(const UUID& uuid, const gu::datetime::Period& wait_period) { /* Close all proto entries corresponding to uuid */ ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; Proto* rp = ProtoMap::value(pi); if (rp->remote_uuid() == uuid) { erase_proto(pi); } } /* Set all corresponding entries in address list to have retry cnt * greater than max retries and next reconnect time after some period */ AddrList::iterator ai; for (ai = remote_addrs_.begin(); ai != remote_addrs_.end(); ++ai) { AddrEntry& ae(AddrList::value(ai)); if (ae.uuid() == uuid) { log_info << "forgetting " << uuid << " (" << AddrList::key(ai) << ")"; ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; if (ProtoMap::value(pi)->remote_addr() == AddrList::key(ai)) { log_info << "deleting entry " << AddrList::key(ai); erase_proto(pi); } } disable_reconnect(*ai); gu::datetime::Date now(gu::datetime::Date::monotonic()); // Don't reduce next reconnect time if it is set greater than // requested if ((now + wait_period > ae.next_reconnect()) || (ae.next_reconnect() == gu::datetime::Date::max())) { ae.set_next_reconnect(gu::datetime::Date::monotonic() + wait_period); } else { log_debug << "not decreasing next reconnect for " << uuid; } } } /* Update state */ update_addresses(); } void gcomm::GMCast::handle_connected(Proto* rp) { const SocketPtr tp(rp->socket()); assert(tp->state() == Socket::S_CONNECTED); log_debug << "transport " << tp << " connected"; if (rp->state() == Proto::S_INIT) { log_debug << "sending handshake"; // accepted socket was waiting for underlying transport // handshake to finish rp->send_handshake(); } } void gcomm::GMCast::handle_established(Proto* est) { log_info << self_string() << " connection established to " << est->remote_uuid() << " " << est->remote_addr(); // UUID checks are handled during protocol handshake assert(est->remote_uuid() != uuid()); if (is_evicted(est->remote_uuid())) { log_warn << "Closing connection to evicted node " << est->remote_uuid(); erase_proto(proto_map_->find_checked(est->socket()->id())); update_addresses(); return; } // If address is found from pending_addrs_, move it to remote_addrs list // and set retry cnt to -1 const std::string& remote_addr(est->remote_addr()); AddrList::iterator i(pending_addrs_.find(remote_addr)); if (i != pending_addrs_.end()) { log_debug << "Erasing " << remote_addr << " from panding list"; pending_addrs_.erase(i); } if ((i = remote_addrs_.find(remote_addr)) == remote_addrs_.end()) { log_debug << "Inserting " << remote_addr << " to remote list"; insert_address (remote_addr, est->remote_uuid(), remote_addrs_); i = remote_addrs_.find(remote_addr); } else if (AddrList::value(i).uuid() != est->remote_uuid()) { log_info << "remote endpoint " << est->remote_addr() << " changed identity " << AddrList::value(i).uuid().full_str() << " -> " << est->remote_uuid().full_str(); remote_addrs_.erase(i); i = remote_addrs_.insert_unique( make_pair(est->remote_addr(), AddrEntry(gu::datetime::Date::monotonic(), gu::datetime::Date::monotonic(), est->remote_uuid()))); } if (AddrList::value(i).retry_cnt() > AddrList::value(i).max_retries()) { log_info << "discarding connection " << est->remote_uuid() << " (" << est->remote_addr() << ") " << "after " << AddrList::value(i).retry_cnt() << " retries"; erase_proto(proto_map_->find(est->socket()->id())); update_addresses(); return; } enable_reconnect(*i); // Cleanup all previously established entries with same // remote uuid. It is assumed that the most recent connection // is usually the healthiest one. ProtoMap::iterator j, j_next; for (j = proto_map_->begin(); j != proto_map_->end(); j = j_next) { j_next = j, ++j_next; Proto* p(ProtoMap::value(j)); if (p->remote_uuid() == est->remote_uuid()) { if (p->handshake_uuid() < est->handshake_uuid()) { log_debug << self_string() << " cleaning up duplicate " << p->socket() << " after established " << est->socket(); erase_proto(j); } else if (p->handshake_uuid() > est->handshake_uuid()) { log_debug << self_string() << " cleaning up established " << est->socket() << " which is duplicate of " << p->socket(); erase_proto(proto_map_->find_checked(est->socket()->id())); update_addresses(); return; } else { assert(p == est); } } } AddrList::iterator ali(find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(est->remote_uuid()))); if (ali != remote_addrs_.end()) { AddrList::value(ali).set_last_connect(); } else { log_warn << "peer " << est->remote_addr() << " not found from remote addresses"; } update_addresses(); } void gcomm::GMCast::handle_failed(Proto* failed) { log_debug << "handle failed: " << *failed; const std::string& remote_addr = failed->remote_addr(); bool found_ok(false); for (ProtoMap::const_iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* p(ProtoMap::value(i)); if (p != failed && p->state() <= Proto::S_OK && p->remote_addr() == failed->remote_addr()) { log_debug << "found live " << *p; found_ok = true; break; } } if (found_ok == false && remote_addr != "") { AddrList::iterator i; if ((i = pending_addrs_.find(remote_addr)) != pending_addrs_.end() || (i = remote_addrs_.find(remote_addr)) != remote_addrs_.end()) { AddrEntry& ae(AddrList::value(i)); ae.set_retry_cnt(ae.retry_cnt() + 1); gu::datetime::Date rtime = gu::datetime::Date::monotonic() + gu::datetime::Period("PT1S"); log_debug << self_string() << " setting next reconnect time to " << rtime << " for " << remote_addr; ae.set_next_reconnect(rtime); } } erase_proto(proto_map_->find_checked(failed->socket()->id())); update_addresses(); } bool gcomm::GMCast::is_connected(const std::string& addr, const UUID& uuid) const { for (ProtoMap::const_iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* conn = ProtoMap::value(i); if (addr == conn->remote_addr() || uuid == conn->remote_uuid()) { return true; } } return false; } void gcomm::GMCast::insert_address (const std::string& addr, const UUID& uuid, AddrList& alist) { if (addr == listen_addr_) { gu_throw_fatal << "Trying to add self addr " << addr << " to addr list"; } if (alist.insert(make_pair(addr, AddrEntry(gu::datetime::Date::monotonic(), gu::datetime::Date::monotonic(), uuid))).second == false) { log_warn << "Duplicate entry: " << addr; } else { log_debug << self_string() << ": new address entry " << uuid << ' ' << addr; } } void gcomm::GMCast::update_addresses() { LinkMap link_map; std::set uuids; /* Add all established connections into uuid_map and update * list of remote addresses */ ProtoMap::iterator i, i_next; for (i = proto_map_->begin(); i != proto_map_->end(); i = i_next) { i_next = i, ++i_next; Proto* rp = ProtoMap::value(i); if (rp->state() == Proto::S_OK) { if (rp->remote_addr() == "" || rp->remote_uuid() == UUID::nil()) { gu_throw_fatal << "Protocol error: local: (" << my_uuid_ << ", '" << listen_addr_ << "'), remote: (" << rp->remote_uuid() << ", '" << rp->remote_addr() << "')"; } if (remote_addrs_.find(rp->remote_addr()) == remote_addrs_.end()) { log_warn << "Connection exists but no addr on addr list for " << rp->remote_addr(); insert_address(rp->remote_addr(), rp->remote_uuid(), remote_addrs_); } if (uuids.insert(rp->remote_uuid()).second == false) { // Duplicate entry, drop this one // @todo Deeper inspection about the connection states log_debug << self_string() << " dropping duplicate entry"; erase_proto(i); } else { link_map.insert(Link(rp->remote_uuid(), rp->remote_addr(), rp->mcast_addr())); } } } /* Send topology change message containing only established * connections */ for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* gp = ProtoMap::value(i); // @todo: a lot of stuff here is done for each connection, including // message creation and serialization. Need a mcast_msg() call // and move this loop in there. if (gp->state() == Proto::S_OK) gp->send_topology_change(link_map); } /* Add entries reported by all other nodes to address list to * get complete view of existing uuids/addresses */ for (ProtoMap::iterator i = proto_map_->begin(); i != proto_map_->end(); ++i) { Proto* rp = ProtoMap::value(i); if (rp->state() == Proto::S_OK) { for (LinkMap::const_iterator j = rp->link_map().begin(); j != rp->link_map().end(); ++j) { const UUID& link_uuid(LinkMap::key(j)); const std::string& link_addr(LinkMap::value(j).addr()); gcomm_assert(link_uuid != UUID::nil() && link_addr != ""); if (addr_blacklist_.find(link_addr) != addr_blacklist_.end()) { log_debug << self_string() << " address '" << link_addr << "' pointing to uuid " << link_uuid << " is blacklisted, skipping"; continue; } if (link_uuid != uuid() && remote_addrs_.find(link_addr) == remote_addrs_.end() && pending_addrs_.find(link_addr) == pending_addrs_.end()) { log_debug << self_string() << " conn refers to but no addr in addr list for " << link_addr; insert_address(link_addr, link_uuid, remote_addrs_); AddrList::iterator pi(remote_addrs_.find(link_addr)); assert(pi != remote_addrs_.end()); AddrEntry& ae(AddrList::value(pi)); enable_reconnect(*pi); // Add some randomness for first reconnect to avoid // simultaneous connects gu::datetime::Date rtime(gu::datetime::Date::monotonic()); rtime = rtime + ::rand() % (100*gu::datetime::MSec); ae.set_next_reconnect(rtime); next_check_ = std::min(next_check_, rtime); } } } } // Build multicast tree log_debug << self_string() << " --- mcast tree begin ---"; segment_map_.clear(); Segment& local_segment(segment_map_[segment_]); if (mcast_) { log_debug << mcast_addr_; local_segment.push_back(RelayEntry(0, mcast_.get())); } self_index_ = 0; for (ProtoMap::const_iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { Proto* p(ProtoMap::value(i)); log_debug << "Proto: " << *p; if (p->remote_segment() == segment_) { if (p->state() == Proto::S_OK && (p->mcast_addr() == "" || p->mcast_addr() != mcast_addr_)) { local_segment.push_back(RelayEntry(p, p->socket().get())); if (p->remote_uuid() < uuid()) { ++self_index_; } } } else { if (p->state() == Proto::S_OK) { Segment& remote_segment(segment_map_[p->remote_segment()]); remote_segment.push_back(RelayEntry(p, p->socket().get())); } } } log_debug << self_string() << " self index: " << self_index_; log_debug << self_string() << " --- mcast tree end ---"; } void gcomm::GMCast::reconnect() { if (is_isolated(isolate_)) { log_debug << "skipping reconnect due to isolation"; return; } /* Loop over known remote addresses and connect if proto entry * does not exist */ gu::datetime::Date now = gu::datetime::Date::monotonic(); AddrList::iterator i, i_next; for (i = pending_addrs_.begin(); i != pending_addrs_.end(); i = i_next) { i_next = i, ++i_next; const std::string& pending_addr(AddrList::key(i)); const AddrEntry& ae(AddrList::value(i)); if (is_connected (pending_addr, UUID::nil()) == false && ae.next_reconnect() <= now) { if (ae.retry_cnt() > ae.max_retries()) { log_info << "cleaning up pending addr " << pending_addr; pending_addrs_.erase(i); continue; // no reference to pending_addr after this } else if (ae.next_reconnect() <= now) { log_debug << "connecting to pending " << pending_addr; gmcast_connect (pending_addr); } } } for (i = remote_addrs_.begin(); i != remote_addrs_.end(); i = i_next) { i_next = i, ++i_next; const std::string& remote_addr(AddrList::key(i)); const AddrEntry& ae(AddrList::value(i)); const UUID& remote_uuid(ae.uuid()); gcomm_assert(remote_uuid != uuid()); if (is_connected(remote_addr, remote_uuid) == false && ae.next_reconnect() <= now) { if (ae.retry_cnt() > ae.max_retries()) { log_info << " cleaning up " << remote_uuid << " (" << remote_addr << ")"; remote_addrs_.erase(i); continue;//no reference to remote_addr or remote_uuid after this } else if (ae.next_reconnect() <= now) { if (ae.retry_cnt() % 30 == 0) { log_info << self_string() << " reconnecting to " << remote_uuid << " (" << remote_addr << "), attempt " << ae.retry_cnt(); } gmcast_connect(remote_addr); } else { // } } } } void gcomm::GMCast::disable_reconnect(AddrList::value_type& entry) { log_debug << "Disabling reconnect for " << entry.first; entry.second.set_max_retries(0); entry.second.set_retry_cnt(1); } void gcomm::GMCast::enable_reconnect(AddrList::value_type& entry) { if (entry.second.retry_cnt() == -1) { return; } log_debug << "Enabling reconnect for " << entry.first; /* Initialize retry cnt to -1 in order to avoid unnecessary logging * at the first connect attempt. * Initial reconnect attempts is limited to terminate reconnection * attempts early if the peer becomes unresponsive. The max retries * is adjusted into higher value in handle_stable_view() once it is * known that the peer has become part of the stable view. */ entry.second.set_retry_cnt(-1); entry.second.set_max_retries(max_initial_reconnect_attempts_); } namespace { class CmpUuidCounts { public: CmpUuidCounts(const std::set& uuids) : uuids_(uuids) { } size_t count(const gcomm::gmcast::Proto* p) const { size_t cnt(0); for (std::set::const_iterator i(uuids_.begin()); i != uuids_.end(); ++i) { for (gcomm::gmcast::LinkMap::const_iterator lm_i(p->link_map().begin()); lm_i != p->link_map().end(); ++lm_i) { if (lm_i->uuid() == *i) { ++cnt; break; } } } return cnt; } bool operator()(const gcomm::gmcast::Proto* a, const gcomm::gmcast::Proto* b) const { const size_t ac = count(a); const size_t bc = count(b); return (ac < bc); } private: const std::set& uuids_; }; } gcomm::GMCast::RelaySet gcomm::GMCast::compute_relay_set(const std::set& proto_set, std::set& nonlive_uuids, uint8_t segment) { std::set relay_set; /* Primary set: nodes in the same segment that are not in the * nonlive_uuids set. */ std::set primary_set; std::copy_if(proto_set.begin(), proto_set.end(), std::inserter(primary_set, primary_set.end()), [segment, &nonlive_uuids](Proto* p) { return p->remote_segment() == segment && nonlive_uuids.count(p->remote_uuid()) == 0; }); populate_relay_set(nonlive_uuids, primary_set, relay_set); if (not nonlive_uuids.empty()) { /* Secondary set: nodes in other segments that are not in the * nonlive_uuids set. */ std::set secondary_set; std::copy_if(proto_set.begin(), proto_set.end(), std::inserter(secondary_set, secondary_set.end()), [segment, &nonlive_uuids](Proto* p) { return p->remote_segment() != segment && nonlive_uuids.count(p->remote_uuid()) == 0; }); populate_relay_set(nonlive_uuids, secondary_set, relay_set); } return relay_set; } void gcomm::GMCast::populate_relay_set( std::set& nonlive_uuids, std::set& lookup_set, gcomm::GMCast::RelaySet& relay_set) { while (nonlive_uuids.empty() == false && lookup_set.empty() == false) { const auto maxel = std::max_element(lookup_set.begin(), lookup_set.end(), CmpUuidCounts(nonlive_uuids)); Proto* p = *maxel; log_debug << "relay set maxel :" << *p << " count: " << CmpUuidCounts(nonlive_uuids).count(p); bool link_found = false; const LinkMap& lm = p->link_map(); /* Check if any of the links provide reachability to a node in * the nonlive_uuids set. */ for (const auto& link : lm) { if (nonlive_uuids.erase(link.uuid()) > 0) { link_found = true; } } /* Only add link if it provides reachability to a node in the * nonlive_uuids set. */ if (link_found) { relay_set.insert(RelayEntry(p, p->socket().get())); lookup_set.erase(maxel); } else { /* As links were not found, max_element must have returned a * link with zero connections to nonlive_uuids. Therefore * there are no candidates left to improve the reachability * and the loop can be terminated. */ assert(CmpUuidCounts(nonlive_uuids).count(p) == 0); break; } } } void gcomm::GMCast::check_liveness() { std::set live_uuids; // iterate over proto map and mark all timed out entries as failed gu::datetime::Date now(gu::datetime::Date::monotonic()); for (ProtoMap::iterator i(proto_map_->begin()); i != proto_map_->end(); ) { // Store next iterator into temporary, handle_failed() may remove // the entry proto_map_. ProtoMap::iterator i_next(i); ++i_next; Proto* p(ProtoMap::value(i)); if (p->state() > Proto::S_INIT && p->state() < Proto::S_FAILED && p->recv_tstamp() + peer_timeout_ < now) { gcomm::SocketStats stats(p->socket()->stats()); log_info << self_string() << " connection to peer " << p->remote_uuid() << " with addr " << p->remote_addr() << " timed out, no messages seen in " << peer_timeout_ << ", socket stats: " << stats; p->set_state(Proto::S_FAILED); handle_failed(p); } else if (p->state() == Proto::S_OK) { gcomm::SocketStats stats(p->socket()->stats()); if (stats.send_queue_length >= 1024) { log_debug << self_string() << " socket send queue to " << " peer " << p->remote_uuid() << " with addr " << p->remote_addr() << ", socket stats: " << stats; } if ((p->recv_tstamp() + peer_timeout_*2/3 < now) || (p->send_tstamp() + peer_timeout_*1/3 < now)) { p->send_keepalive(); } if (p->state() == Proto::S_FAILED) { handle_failed(p); } else { live_uuids.insert(p->remote_uuid()); } } i = i_next; } bool should_relay(false); // iterate over addr list and check if there is at least one live // proto entry associated to each addr entry std::set nonlive_uuids; std::string nonlive_peers; for (AddrList::const_iterator i(remote_addrs_.begin()); i != remote_addrs_.end(); ++i) { const AddrEntry& ae(AddrList::value(i)); if (ae.retry_cnt() <= ae.max_retries() && live_uuids.find(ae.uuid()) == live_uuids.end()) { log_debug << self_string() << " missing live proto entry for " << ae.uuid(); nonlive_uuids.insert(ae.uuid()); nonlive_peers += AddrList::key(i) + " "; should_relay = true; } else if (ae.last_connect() + peer_timeout_ > now) { log_debug << "continuing relaying to " << i->first << " for " << (ae.last_connect() + peer_timeout_ - now); should_relay = true; } } if (should_relay == true) { if (relaying_ == false) { if (not nonlive_uuids.empty()) { log_info << self_string() << " turning message relay requesting on, nonlive peers: " << nonlive_peers; } relaying_ = true; } relay_set_.clear(); // build set of protos having OK status std::set proto_set; for (ProtoMap::iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { Proto* p(ProtoMap::value(i)); if (p->state() == Proto::S_OK) { proto_set.insert(p); } } relay_set_ = compute_relay_set(proto_set, nonlive_uuids, segment_); } else if (relaying_ == true && should_relay == false) { log_info << self_string() << " turning message relay requesting off"; relay_set_.clear(); relaying_ = false; } } gu::datetime::Date gcomm::GMCast::handle_timers() { const gu::datetime::Date now(gu::datetime::Date::monotonic()); if (now >= next_check_) { check_liveness(); reconnect(); next_check_ = now + check_period_; } return next_check_; } void gcomm::GMCast::send(const RelayEntry& re, int segment, gcomm::Datagram& dg) { int err; if ((err = re.socket->send(segment, dg)) != 0) { log_debug << "failed to send to " << re.socket->remote_addr() << ": (" << err << ") " << strerror(err); } else if (re.proto) { re.proto->set_send_tstamp(gu::datetime::Date::monotonic()); } } void gcomm::GMCast::relay(const Message& msg, const Datagram& dg, const void* exclude_id) { Datagram relay_dg(dg); relay_dg.normalize(); Message relay_msg(msg); // reset all relay flags from message to be relayed relay_msg.set_flags(relay_msg.flags() & ~(Message::F_RELAY | Message::F_SEGMENT_RELAY)); // if F_RELAY is set in received message, relay to all peers except // the originator if (msg.flags() & Message::F_RELAY) { gu_trace(push_header(relay_msg, relay_dg)); for (SegmentMap::iterator segment_i(segment_map_.begin()); segment_i != segment_map_.end(); ++segment_i) { Segment& segment(segment_i->second); for (Segment::iterator target_i(segment.begin()); target_i != segment.end(); ++target_i) { if ((*target_i).socket->id() != exclude_id) { send(*target_i, msg.segment_id(), relay_dg); } } } } else if (msg.flags() & Message::F_SEGMENT_RELAY) { if (relay_set_.empty() == false) { // send message to all nodes in relay set to reach // nodes in local segment that are not directly reachable relay_msg.set_flags(relay_msg.flags() | Message::F_RELAY); gu_trace(push_header(relay_msg, relay_dg)); for (RelaySet::iterator relay_i(relay_set_.begin()); relay_i != relay_set_.end(); ++relay_i) { if ((*relay_i).socket->id() != exclude_id) { send(*relay_i, msg.segment_id(), relay_dg); } } gu_trace(pop_header(relay_msg, relay_dg)); relay_msg.set_flags(relay_msg.flags() & ~Message::F_RELAY); } if (msg.segment_id() == segment_) { log_warn << "message with F_SEGMENT_RELAY from own segment, " << "source " << msg.source_uuid(); } // Relay to local segment gu_trace(push_header(relay_msg, relay_dg)); Segment& segment(segment_map_[segment_]); for (Segment::iterator i(segment.begin()); i != segment.end(); ++i) { send(*i, msg.segment_id(), relay_dg); } } else { log_warn << "GMCast::relay() called without relay flags set"; } } void gcomm::GMCast::handle_up(const void* id, const Datagram& dg, const ProtoUpMeta& um) { ProtoMap::iterator i; if (listener_ == 0) { return; } if (id == listener_->id()) { gmcast_accept(); } else if (mcast_ && id == mcast_->id()) { Message msg; try { if (dg.offset() < dg.header_len()) { gu_trace(msg.unserialize(dg.header(), dg.header_size(), dg.header_offset() + dg.offset())); } else { gu_trace(msg.unserialize(dg.payload().data(), dg.len(), dg.offset())); } } catch (gu::Exception& e) { GU_TRACE(e); log_warn << e.what(); return; } if (msg.type() >= Message::GMCAST_T_USER_BASE) { gu_trace(send_up(Datagram(dg, dg.offset() + msg.serial_size()), ProtoUpMeta(msg.source_uuid()))); } else { log_warn << "non-user message " << msg.type() << " from multicast socket"; } } else if ((i = proto_map_->find(id)) != proto_map_->end()) { Proto* p(ProtoMap::value(i)); if (dg.len() > 0) { const Proto::State prev_state(p->state()); if (prev_state == Proto::S_FAILED) { log_warn << "unhandled failed proto"; handle_failed(p); return; } Message msg; try { msg.unserialize(dg.payload().data(), dg.len(), dg.offset()); } catch (gu::Exception& e) { GU_TRACE(e); log_warn << e.what(); p->set_state(Proto::S_FAILED); handle_failed(p); return; } if (msg.type() >= Message::GMCAST_T_USER_BASE) { if (evict_list().empty() == false && evict_list().find(msg.source_uuid()) != evict_list().end()) { return; } if (msg.flags() & (Message::F_RELAY | Message::F_SEGMENT_RELAY)) { relay(msg, Datagram(dg, dg.offset() + msg.serial_size()), id); } p->set_recv_tstamp(gu::datetime::Date::monotonic()); send_up(Datagram(dg, dg.offset() + msg.serial_size()), ProtoUpMeta(msg.source_uuid())); return; } else { try { p->set_recv_tstamp(gu::datetime::Date::monotonic()); gu_trace(p->handle_message(msg)); } catch (const gu::Exception& e) { handle_failed(p); if (e.get_errno() == ENOTRECOVERABLE) { throw; } log_warn << "handling gmcast protocol message failed: " << e.what(); return; } if (p->state() == Proto::S_FAILED) { handle_failed(p); return; } else if (p->check_changed_and_reset() == true) { update_addresses(); check_liveness(); reconnect(); } } if (prev_state != Proto::S_OK && p->state() == Proto::S_OK) { handle_established(p); } } else if (p->socket()->state() == Socket::S_CONNECTED && (p->state() == Proto::S_HANDSHAKE_WAIT || p->state() == Proto::S_INIT)) { handle_connected(p); } else if (p->socket()->state() == Socket::S_CONNECTED) { log_warn << "connection " << p->socket()->id() << " closed by peer"; p->set_state(Proto::S_FAILED); handle_failed(p); } else { log_debug << "socket in state " << p->socket()->state(); p->set_state(Proto::S_FAILED); handle_failed(p); } } else { // log_info << "proto entry " << id << " not found"; } } static gcomm::gmcast::Proto* find_by_remote_uuid( const gcomm::gmcast::ProtoMap& proto_map, const gcomm::UUID& uuid) { for (gcomm::gmcast::ProtoMap::const_iterator i(proto_map.begin()); i != proto_map.end(); ++i) { if (i->second->remote_uuid() == uuid) { return i->second; } } return 0; } int gcomm::GMCast::handle_down(Datagram& dg, const ProtoDownMeta& dm) { Message msg(version_, Message::GMCAST_T_USER_BASE, uuid(), 1, segment_); // If target is set and proto entry for target is found, // send a direct message. Otherwise fall back for broadcast // to ensure message delivery via relay if (dm.target() != UUID::nil()) { Proto* target_proto(find_by_remote_uuid(*proto_map_, dm.target())); if (target_proto && target_proto->state() == Proto::S_OK) { gu_trace(push_header(msg, dg)); int err; if ((err = target_proto->socket()->send(msg.segment_id(), dg)) != 0) { log_debug << "failed to send to " << target_proto->socket()->remote_addr() << ": (" << err << ") " << strerror(err); } else { target_proto->set_send_tstamp(gu::datetime::Date::monotonic()); } gu_trace(pop_header(msg, dg)); if (err == 0) { return 0; } // In case of error fall back to broadcast } else { log_debug << "Target " << dm.target() << " proto not found"; } } // handle relay set first, skip these peers below if (relay_set_.empty() == false) { msg.set_flags(msg.flags() | Message::F_RELAY); gu_trace(push_header(msg, dg)); for (RelaySet::iterator ri(relay_set_.begin()); ri != relay_set_.end(); ++ri) { send(*ri, msg.segment_id(), dg); } gu_trace(pop_header(msg, dg)); msg.set_flags(msg.flags() & ~Message::F_RELAY); } for (SegmentMap::iterator si(segment_map_.begin()); si != segment_map_.end(); ++si) { uint8_t segment_id(si->first); Segment& segment(si->second); if (segment_id != segment_) { size_t target_idx((self_index_ + segment_id) % segment.size()); msg.set_flags(msg.flags() | Message::F_SEGMENT_RELAY); // skip peers that are in relay set if (relay_set_.empty() == true || relay_set_.find(segment[target_idx]) == relay_set_.end()) { gu_trace(push_header(msg, dg)); send(segment[target_idx], msg.segment_id(), dg); gu_trace(pop_header(msg, dg)); } } else { msg.set_flags(msg.flags() & ~Message::F_SEGMENT_RELAY); gu_trace(push_header(msg, dg)); for (Segment::iterator i(segment.begin()); i != segment.end(); ++i) { // skip peers that are in relay set if (relay_set_.empty() == true || relay_set_.find(*i) == relay_set_.end()) { send(*i, msg.segment_id(), dg); } } gu_trace(pop_header(msg, dg)); } } return 0; } void gcomm::GMCast::handle_stable_view(const View& view) { log_debug << "GMCast::handle_stable_view: " << view; if (view.type() == V_PRIM) { // discard addr list entries not in view std::set gmcast_lst; for (AddrList::const_iterator i(remote_addrs_.begin()); i != remote_addrs_.end(); ++i) { gmcast_lst.insert(i->second.uuid()); } std::set view_lst; for (NodeList::const_iterator i(view.members().begin()); i != view.members().end(); ++i) { view_lst.insert(i->first); } std::list diff; std::set_difference(gmcast_lst.begin(), gmcast_lst.end(), view_lst.begin(), view_lst.end(), std::back_inserter(diff)); // Forget partitioned entries, allow them to reconnect // in time_wait_/2. Left nodes are given time_wait_ ban for // reconnecting when handling V_REG below. for (std::list::const_iterator i(diff.begin()); i != diff.end(); ++i) { gmcast_forget(*i, time_wait_/2); } // mark nodes in view as stable for (std::set::const_iterator i(view_lst.begin()); i != view_lst.end(); ++i) { AddrList::iterator ai; if ((ai = find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(*i))) != remote_addrs_.end()) { ai->second.set_retry_cnt(-1); ai->second.set_max_retries(max_retry_cnt_); } } // iterate over pending address list and discard entries without UUID for (AddrList::iterator i(pending_addrs_.begin()); i != pending_addrs_.end(); ) { AddrList::iterator i_next(i); ++i_next; const AddrEntry& ae(AddrList::value(i)); if (ae.uuid() == UUID()) { const std::string addr(AddrList::key(i)); log_info << "discarding pending addr without UUID: " << addr; for (ProtoMap::iterator pi(proto_map_->begin()); pi != proto_map_->end();) { ProtoMap::iterator pi_next(pi); ++pi_next; Proto* p(ProtoMap::value(pi)); if (p->remote_addr() == addr) { log_info << "discarding pending addr proto entry " << p; erase_proto(pi); } pi = pi_next; } pending_addrs_.erase(i); } i = i_next; } prim_view_reached_ = true; } else if (view.type() == V_REG) { for (NodeList::const_iterator i(view.members().begin()); i != view.members().end(); ++i) { AddrList::iterator ai; if ((ai = find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(NodeList::key(i)))) != remote_addrs_.end()) { log_info << "declaring " << NodeList::key(i) << " at " << handle_get_address(NodeList::key(i)) << " stable"; ai->second.set_retry_cnt(-1); ai->second.set_max_retries(max_retry_cnt_); } } // Forget left nodes for (NodeList::const_iterator i(view.left().begin()); i != view.left().end(); ++i) { gmcast_forget(NodeList::key(i), time_wait_); } } check_liveness(); for (ProtoMap::const_iterator i(proto_map_->begin()); i != proto_map_->end(); ++i) { log_debug << "proto: " << *ProtoMap::value(i); } } void gcomm::GMCast::handle_allow_connect(const UUID& uuid) { auto it = std::find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(uuid)); if (it != remote_addrs_.end()) { enable_reconnect(*it); } } void gcomm::GMCast::handle_evict(const UUID& uuid) { if (is_evicted(uuid) == true) { return; } gmcast_forget(uuid, time_wait_); } std::string gcomm::GMCast::handle_get_address(const UUID& uuid) const { AddrList::const_iterator ali( find_if(remote_addrs_.begin(), remote_addrs_.end(), AddrListUUIDCmp(uuid))); return (ali == remote_addrs_.end() ? "" : AddrList::key(ali)); } void gcomm::GMCast::add_or_del_addr(const std::string& val) { if (val.compare(0, 4, "add:") == 0) { gu::URI uri(val.substr(4)); std::string addr(gu::net::resolve(uri_string(get_scheme(use_ssl_, dynamic_socket_), uri.get_host(), uri.get_port())).to_string()); log_info << "inserting address '" << addr << "'"; insert_address(addr, UUID(), remote_addrs_); AddrList::iterator ai(remote_addrs_.find(addr)); enable_reconnect(*ai); } else if (val.compare(0, 4, "del:") == 0) { std::string addr(val.substr(4)); AddrList::iterator ai(remote_addrs_.find(addr)); if (ai != remote_addrs_.end()) { ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; Proto* rp = ProtoMap::value(pi); if (rp->remote_addr() == AddrList::key(ai)) { log_info << "deleting entry " << AddrList::key(ai); erase_proto(pi); } } AddrEntry& ae(AddrList::value(ai)); disable_reconnect(*ai); ae.set_next_reconnect(gu::datetime::Date::monotonic() + time_wait_); update_addresses(); } else { log_info << "address '" << addr << "' not found from remote addrs list"; } } else { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } } bool gcomm::GMCast::set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb) { try { if (key == Conf::GMCastMaxInitialReconnectAttempts) { max_initial_reconnect_attempts_ = gu::from_string(val); return true; } else if (key == Conf::GMCastPeerAddr) { try { add_or_del_addr(val); } catch (gu::NotFound& nf) { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } catch (gu::NotSet& ns) { gu_throw_error(EINVAL) << "invalid addr spec '" << val << "'"; } return true; } else if (key == Conf::GMCastIsolate) { int tmpval = gu::from_string(val); if (tmpval < 0 || tmpval > 2) { gu_throw_error(EINVAL) << "invalid value for gmacst.isolate: '" << tmpval << "'"; } isolate_ = tmpval; log_info << "turning isolation " << (isolate_ == 1 ? "on" : (isolate_ == 2 ? "force quit" : "off")); if (isolate_) { // delete all entries in proto map ProtoMap::iterator pi, pi_next; for (pi = proto_map_->begin(); pi != proto_map_->end(); pi = pi_next) { pi_next = pi, ++pi_next; erase_proto(pi); } segment_map_.clear(); } return true; } else if (key == Conf::SocketRecvBufSize) { gu_trace(Conf::check_recv_buf_size(val)); conf_.set(key, val); for (ProtoMap::iterator pi(proto_map_->begin()); pi != proto_map_->end(); ++pi) { gu_trace(pi->second->socket()->set_option(key, val)); // erase_proto(pi++); } // segment_map_.clear(); // reconnect(); return true; } else if (key == Conf::GMCastGroup || key == Conf::GMCastListenAddr || key == Conf::GMCastMCastAddr || key == Conf::GMCastMCastPort || key == Conf::GMCastMCastTTL || key == Conf::GMCastTimeWait || key == Conf::GMCastPeerTimeout || key == Conf::GMCastSegment) { gu_throw_error(EPERM) << "can't change value during runtime"; } } catch (gu::Exception& e) { GU_TRACE(e); throw; } catch (std::exception& e) { gu_throw_error(EINVAL) << e.what(); } catch (...) { gu_throw_error(EINVAL) << "exception"; } return false; } galera-4-26.4.25/gcomm/src/protocol_version.hpp000644 000164 177776 00000000317 15107057155 022525 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2014 Codership Oy */ #ifndef GCOMM_PROTOCOL_VERSION_HPP #define GCOMM_PROTOCOL_VERSION_HPP #define GCOMM_PROTOCOL_MAX_VERSION 1 #endif // GCOMM_PROTOCOL_VERSION_HPP galera-4-26.4.25/gcomm/src/gmcast_proto.cpp000644 000164 177776 00000032750 15107057155 021621 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2025 Codership Oy */ #include "gmcast_proto.hpp" #include "gmcast.hpp" #include "gu_uri.hpp" #include "gu_event_service.hpp" static void emit_evicted_event() { std::ostringstream os; os << "{\"status\": \"evicted\", " << "\"message\": " << "\"This node was evicted permanently from cluster, " << "restart is required\"}"; gu::EventService::callback("event", os.str()); } static const std::string gmcast_proto_err_evicted("evicted"); static const std::string gmcast_proto_err_invalid_group("invalid group"); static const std::string gmcast_proto_err_duplicate_uuid("duplicate uuid"); const gcomm::UUID& gcomm::gmcast::Proto::local_uuid() const { return context_.node_uuid(); } std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const Proto& p) { os << "v=" << p.version_ << "," << "hu=" << p.handshake_uuid_ << "," << "lu=" << p.context_.node_uuid() << "," << "ru=" << p.remote_uuid_ << "," << "ls=" << static_cast(p.local_segment_) << "," << "rs=" << static_cast(p.remote_segment_) << "," << "la=" << p.local_addr_ << "," << "ra=" << p.remote_addr_ << "," << "mc=" << p.mcast_addr_ << "," << "gn=" << p.group_name_ << "," << "ch=" << p.changed_ << "," << "st=" << gcomm::gmcast::Proto::to_string(p.state_) << "," << "pr=" << p.propagate_remote_ << "," << "tp=" << p.tp_ << "," << "rts=" << p.recv_tstamp_ << "," << "sts=" << p.send_tstamp_; return os; } void gcomm::gmcast::Proto:: set_state(State new_state) { static const bool allowed[][7] = { // INIT HS_SENT HS_WAIT HSR_SENT OK FAILED CLOSED { false, true, true, false, false, true, false },// INIT { false, false, false, false, true, true, false },// HS_SENT { false, false, false, true, false, true, false },// HS_WAIT { false, false, false, false, true, true, false },// HSR_SENT { false, false, false, false, true, true, true },// OK { false, false, false, false, false, true, true },// FAILED { false, false, false, false, false, false, false } // CLOSED }; if (!allowed[state_][new_state]) { /* TODO: Make this a warning and make the proto failed in * release build. */ gu_throw_fatal << "Invalid state change: " << to_string(state_) << " -> " << to_string(new_state); } log_debug << *this << " from state: " << to_string(state_) << " to state: " << to_string(new_state); state_ = new_state; } void gcomm::gmcast::Proto::send_msg(const Message& msg, bool ignore_no_buffer_space) { gu::Buffer buf; gu_trace(serialize(msg, buf)); Datagram dg(buf); int ret = tp_->send(msg.segment_id(), dg); if (ret != 0) { if (not (ret == ENOBUFS && ignore_no_buffer_space)) { log_debug << "Send failed: " << strerror(ret); set_state(S_FAILED); } } } void gcomm::gmcast::Proto::send_handshake() { handshake_uuid_ = UUID(0, 0); Message hs (version_, Message::GMCAST_T_HANDSHAKE, handshake_uuid_, context_.node_uuid(), local_segment_); send_msg(hs, false); set_state(S_HANDSHAKE_SENT); } void gcomm::gmcast::Proto::wait_handshake() { if (state() != S_INIT) gu_throw_fatal << "Invalid state: " << to_string(state()); set_state(S_HANDSHAKE_WAIT); } bool gcomm::gmcast::Proto::validate_handshake_uuid() { // // Sanity checks for duplicate UUIDs. // // 1) Check if the other endpoint exists on this node. If so, // the address will be blacklisted and this connection terminated. // 2) Check if the remote endpoint has same UUID and abort if // this node has not reached prim view. This deals with the case where // this node is connected to the node with same UUID and this node // has not reached primary component yet. // 3) This node is connected to an another node which has an // UUID which already exists in the cluster with different // address. This may happen if // - The other node has restarted fast and regenerated a new UUID // which conflicts with existing UUID // - The other node changed its address // In this case we send an evict message and rely on the other // node to take correct action (abort if it was joining, retry // if its address changed). // if (context_.is_own(this)) { // Connecting to own address should not get past the first // handshake message so we should see here only S_HANDSHAKE_WAIT // state. assert(state() == S_HANDSHAKE_WAIT); log_info << context_.self_string() << " Found matching local endpoint for a connection, " << "blacklisting address " << remote_addr(); context_.blacklist(this); set_state(S_FAILED); return false; } else if (context_.node_uuid() == remote_uuid() && context_.prim_view_reached() == false) { // Direct connection to node with the same UUID, the duplicate // UUID should be handled when the first handshake message // is seen, so we should see here only S_HANDSHAKE_WAIT state. assert(state() == S_HANDSHAKE_WAIT); // Remove gvwstate.dat, otherwise the same UUID will be // used again when the node is restarted. context_.remove_viewstate_file(); set_state(S_FAILED); gu_throw_fatal << "A node with the same UUID already exists in the cluster. " << "Removing gvwstate.dat file, this node will generate a new " << "UUID when restarted."; } else if (context_.is_not_own_and_duplicate_exists(this)) { evict_duplicate_uuid(); // Sets state to failed return false; } return true; } void gcomm::gmcast::Proto::handle_handshake(const Message& hs) { if (state() != S_HANDSHAKE_WAIT) gu_throw_fatal << "Invalid state: " << to_string(state()); if (hs.version() != version_) { log_warn << "incompatible protocol version: " << hs.version(); set_state(S_FAILED); return; } handshake_uuid_ = hs.handshake_uuid(); remote_uuid_ = hs.source_uuid(); remote_segment_ = hs.segment_id(); if (validate_handshake_uuid() == false) { assert(state() == S_FAILED); // Should be adjusted by validate return; } Message hsr (version_, Message::GMCAST_T_HANDSHAKE_RESPONSE, handshake_uuid_, context_.node_uuid(), local_addr_, group_name_, local_segment_); send_msg(hsr, false); set_state(S_HANDSHAKE_RESPONSE_SENT); } void gcomm::gmcast::Proto::handle_handshake_response(const Message& hs) { if (state() != S_HANDSHAKE_SENT) gu_throw_fatal << "Invalid state: " << to_string(state()); const std::string& grp = hs.group_name(); try { if (grp != group_name_) { log_info << "handshake failed, my group: '" << group_name_ << "', peer group: '" << grp << "'"; Message failed(version_, Message::GMCAST_T_FAIL, context_.node_uuid(), local_segment_, gmcast_proto_err_invalid_group); send_msg(failed, false); set_state(S_FAILED); return; } remote_uuid_ = hs.source_uuid(); remote_segment_ = hs.segment_id(); gu::URI remote_uri(tp_->remote_addr()); remote_addr_ = uri_string(remote_uri.get_scheme(), remote_uri.get_host(), gu::URI(hs.node_address()).get_port()); if (context_.is_proto_evicted(this) == true) { log_info << "peer " << remote_uuid_ << " from " << remote_addr_ << " has been evicted out, rejecting connection"; evict(); return; } if (validate_handshake_uuid() == false) { assert(state() == S_FAILED); // Should be adjusted by validate return; } propagate_remote_ = true; Message ok(version_, Message::GMCAST_T_OK, context_.node_uuid(), local_segment_, ""); send_msg(ok, false); set_state(S_OK); } catch (std::exception& e) { log_warn << "Parsing peer address '" << hs.node_address() << "' failed: " << e.what(); Message nok (version_, Message::GMCAST_T_FAIL, context_.node_uuid(), local_segment_, "invalid node address"); send_msg (nok, false); set_state(S_FAILED); } } void gcomm::gmcast::Proto::handle_ok(const Message& hs) { if (state_ == S_OK) { log_debug << "handshake ok: " << *this; } propagate_remote_ = true; set_state(S_OK); } void gcomm::gmcast::Proto::handle_failed(const Message& hs) { log_debug << "handshake with " << remote_uuid_ << " " << remote_addr_ << " failed: '" << hs.error() << "'"; set_state(S_FAILED); if (hs.error() == gmcast_proto_err_evicted) { // otherwise node use the uuid in view state file. // which is probably still in other nodes evict list. context_.remove_viewstate_file(); emit_evicted_event(); gu_throw_fatal << "this node has been evicted out of the cluster, " << "gcomm backend restart is required"; } else if (hs.error() == gmcast_proto_err_duplicate_uuid) { if (context_.prim_view_reached()) { log_info << "Received duplicate UUID error from other node " << "while in primary component. This may mean that " << "this node's IP address has changed. Will close " << "connection and keep on retrying"; } else { // Remove gvwstate.dat, otherwise the same UUID will be // used again when the node is restarted. context_.remove_viewstate_file(); gu_throw_fatal << "A node with the same UUID already exists in the cluster. " << "Removing gvwstate.dat file, this node will generate a new " << "UUID when restarted."; } } } void gcomm::gmcast::Proto::handle_topology_change(const Message& msg) { const Message::NodeList& nl(msg.node_list()); LinkMap new_map; for (Message::NodeList::const_iterator i = nl.begin(); i != nl.end(); ++i) { new_map.insert(Link(Message::NodeList::key(i), Message::NodeList::value(i).addr(), Message::NodeList::value(i).mcast_addr())); if (Message::NodeList::key(i) == remote_uuid() && mcast_addr_ == "" && Message::NodeList::value(i).mcast_addr() != "") { mcast_addr_ = Message::NodeList::value(i).mcast_addr(); } } using std::rel_ops::operator!=; if (link_map_ != new_map) { changed_ = true; } link_map_ = new_map; } void gcomm::gmcast::Proto::handle_keepalive(const Message& msg) { log_debug << "keepalive: " << *this; Message ok(version_, Message::GMCAST_T_OK, context_.node_uuid(), local_segment_, ""); send_msg(ok, true); } void gcomm::gmcast::Proto::send_topology_change(LinkMap& um) { Message::NodeList nl; for (LinkMap::const_iterator i = um.begin(); i != um.end(); ++i) { if (LinkMap::key(i) == UUID::nil() || LinkMap::value(i).addr() == "") gu_throw_fatal << "nil uuid or empty address"; nl.insert_unique( std::make_pair(LinkMap::key(i), Node(LinkMap::value(i).addr()))); } Message msg(version_, Message::GMCAST_T_TOPOLOGY_CHANGE, context_.node_uuid(), group_name_, nl); send_msg(msg, false); } void gcomm::gmcast::Proto::send_keepalive() { log_debug << "sending keepalive: " << *this; Message msg(version_, Message::GMCAST_T_KEEPALIVE, context_.node_uuid(), local_segment_, ""); send_msg(msg, true); } void gcomm::gmcast::Proto::evict() { Message failed(version_, Message::GMCAST_T_FAIL, context_.node_uuid(), local_segment_, gmcast_proto_err_evicted); send_msg(failed, false); set_state(S_FAILED); } void gcomm::gmcast::Proto::evict_duplicate_uuid() { Message failed(version_, Message::GMCAST_T_FAIL, context_.node_uuid(), local_segment_, gmcast_proto_err_duplicate_uuid); send_msg(failed, false); set_state(S_FAILED); } void gcomm::gmcast::Proto::handle_message(const Message& msg) { switch (msg.type()) { case Message::GMCAST_T_HANDSHAKE: handle_handshake(msg); break; case Message::GMCAST_T_HANDSHAKE_RESPONSE: handle_handshake_response(msg); break; case Message::GMCAST_T_OK: handle_ok(msg); break; case Message::GMCAST_T_FAIL: handle_failed(msg); break; case Message::GMCAST_T_TOPOLOGY_CHANGE: handle_topology_change(msg); break; case Message::GMCAST_T_KEEPALIVE: handle_keepalive(msg); break; default: gu_throw_fatal << "invalid message type: " << msg.type(); } } galera-4-26.4.25/gcomm/src/asio_tcp.cpp000644 000164 177776 00000055676 15107057155 020735 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2012-2024 Codership Oy */ #include "asio_tcp.hpp" #include "gcomm/util.hpp" #include "gcomm/common.hpp" #define FAILED_HANDLER(_e) failed_handler(_e, __FUNCTION__, __LINE__) // Helpers to set socket buffer sizes for both connecting // and listening sockets. static bool asio_recv_buf_warned(false); template void set_recv_buf_size_helper(const gu::Config& conf, Socket& socket) { if (conf.get(gcomm::Conf::SocketRecvBufSize) != GCOMM_ASIO_AUTO_BUF_SIZE) { size_t const recv_buf_size (conf.get(gcomm::Conf::SocketRecvBufSize)); // this should have been checked already assert(ssize_t(recv_buf_size) >= 0); socket->set_receive_buffer_size(recv_buf_size); size_t cur_value(socket->get_receive_buffer_size()); log_debug << "socket recv buf size " << cur_value; if (cur_value < recv_buf_size && not asio_recv_buf_warned) { log_warn << "Receive buffer size " << cur_value << " less than requested " << recv_buf_size << ", this may affect performance in high latency/high " << "throughput networks."; asio_recv_buf_warned = true; } } } static bool asio_send_buf_warned(false); template void set_send_buf_size_helper(const gu::Config& conf, Socket& socket) { if (conf.get(gcomm::Conf::SocketSendBufSize) != GCOMM_ASIO_AUTO_BUF_SIZE) { size_t const send_buf_size (conf.get(gcomm::Conf::SocketSendBufSize)); // this should have been checked already assert(ssize_t(send_buf_size) >= 0); socket->set_send_buffer_size(send_buf_size); size_t cur_value(socket->get_send_buffer_size()); log_debug << "socket send buf size " << cur_value; if (cur_value < send_buf_size && not asio_send_buf_warned) { log_warn << "Send buffer size " << cur_value << " less than requested " << send_buf_size << ", this may affect performance in high latency/high " << "throughput networks."; asio_send_buf_warned = true; } } } gcomm::AsioTcpSocket::AsioTcpSocket(AsioProtonet& net, const gu::URI& uri) : Socket (uri), net_ (net), socket_ (net.io_service_.make_socket(uri)), send_q_ (), last_queued_tstamp_(), recv_buf_ (net_.mtu() + NetHeader::serial_size_), recv_offset_ (0), last_delivered_tstamp_(), state_ (S_CLOSED), deferred_close_timer_() { log_debug << "ctor for " << id() << " uri " << uri; } gcomm::AsioTcpSocket::AsioTcpSocket(AsioProtonet& net, const gu::URI& uri, const std::shared_ptr& socket) : Socket (uri), net_ (net), socket_ (socket), send_q_ (), last_queued_tstamp_(), recv_buf_ (net_.mtu() + NetHeader::serial_size_), recv_offset_ (0), last_delivered_tstamp_(), state_ (S_CLOSED), deferred_close_timer_() { log_debug << "ctor for " << id() << " uri " << uri; } gcomm::AsioTcpSocket::~AsioTcpSocket() { log_debug << "dtor for " << id() << " state " << state_ << " send q size " << send_q_.size(); if (state_ != S_CLOSED) { socket_->close(); } } void gcomm::AsioTcpSocket::failed_handler(const gu::AsioErrorCode& ec, const std::string& func, int line) { log_debug << "failed handler from " << func << ":" << line << " socket " << id() << " error " << ec << " " << socket_->is_open() << " state " << state(); try { log_debug << "local endpoint " << local_addr() << " remote endpoint " << remote_addr(); } catch (...) { } const State prev_state(state()); if (state() != S_CLOSED) { state_ = S_FAILED; } if (prev_state != S_FAILED && prev_state != S_CLOSED) { net_.dispatch(id(), Datagram(), ProtoUpMeta(ec.value())); } } void gcomm::AsioTcpSocket::connect_handler(gu::AsioSocket& socket, const gu::AsioErrorCode& ec) { Critical crit(net_); try { if (ec) { log_info << "Failed to establish connection: " << ec; FAILED_HANDLER(ec); return; } else { state_ = S_CONNECTED; init_tstamps(); net_.dispatch(id(), Datagram(), ProtoUpMeta(ec.value())); async_receive(); } } catch (const gu::Exception& e) { FAILED_HANDLER(gu::AsioErrorCode(e.get_errno())); } } void gcomm::AsioTcpSocket::connect(const gu::URI& uri) { try { Critical crit(net_); socket_->open(uri); set_buf_sizes(); // Must be done before connect const std::string bind_ip(uri.get_option(gcomm::Socket::OptIfAddr, "")); if (not bind_ip.empty()) { socket_->bind(gu::make_address(bind_ip)); } socket_->async_connect(uri, shared_from_this()); state_ = S_CONNECTING; } catch (const gu::Exception& e) { std::ostringstream msg; msg << "error while connecting to remote host " << uri.to_string() << "', asio error '" << e.what() << "'"; log_warn << msg.str(); gu_throw_error(e.get_errno()) << msg.str(); } } #include "gu_disable_non_virtual_dtor.hpp" // Helper class to keep the socket open for writing remaining messages // after gcomm::AsioTcpSocket::close() has been called. // The socket is kept open until all queued messages have been written // or timeout occurs. This is achieved by storing shared pointer // of the socket into timer object. class gcomm::AsioTcpSocket::DeferredCloseTimer : public gu::AsioSteadyTimerHandler , public std::enable_shared_from_this { public: DeferredCloseTimer(gu::AsioIoService& io_service, const std::shared_ptr& socket) : socket_(socket) , io_service_(io_service) , timer_(io_service_) { } ~DeferredCloseTimer() { log_debug << "Deferred close timer destruct"; } void start() { timer_.expires_from_now(std::chrono::seconds(5)); timer_.async_wait(shared_from_this()); log_debug << "Deferred close timer started for socket with " << "remote endpoint: " << socket_->remote_addr(); } void cancel() { log_debug << "Deferred close timer cancel " << socket_->socket_; timer_.cancel(); } virtual void handle_wait(const gu::AsioErrorCode& ec) GALERA_OVERRIDE { log_debug << "Deferred close timer handle_wait " << ec << " for " << socket_->socket_; socket_->close(); socket_.reset(); } private: std::shared_ptr socket_; gu::AsioIoService& io_service_; gu::AsioSteadyTimer timer_; }; #include "gu_enable_non_virtual_dtor.hpp" void gcomm::AsioTcpSocket::close() { Critical crit(net_); if (state() == S_CLOSED || state() == S_CLOSING) return; log_debug << "closing " << id() << " socket " << socket_ << " state " << state() << " send_q size " << send_q_.size(); if (state() == S_CONNECTED) { state_ = S_CLOSING; auto timer = std::make_shared( net_.io_service_, shared_from_this()); deferred_close_timer_ = timer; timer->start(); /* Shut down if there are no more messages to send. The actual closing * of socket happens when either read or write handler gets called with * error. */ if (send_q_.empty()) { socket_->shutdown(); } } else { state_ = S_CLOSED; socket_->close(); } } // Enable to introduce random errors for write handler // #define GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR void gcomm::AsioTcpSocket::write_handler(gu::AsioSocket& socket, const gu::AsioErrorCode& ec, size_t bytes_transferred) { #ifdef GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR static const long empty_rate(10000); static const long bytes_transferred_less_than_rate(10000); static const long bytes_transferred_not_zero_rate(10000); #endif // GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR Critical crit(net_); if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "write handler for " << id() << " state " << state(); if (ec && not gu::is_verbose_error(ec)) { log_warn << "write_handler(): " << ec.message() << " (" << gu::extra_error_info(ec) << ")"; } return; } log_debug << "gcomm::AsioTcpSocket::write_handler() ec " << ec << " socket " << socket_ << " send_q " << send_q_.size(); if (!ec) { if (send_q_.empty() == true #ifdef GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR || ::rand() % empty_rate == 0 #endif // GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR ) { log_warn << "write_handler() called with empty send_q_. " << "Transport may not be reliable, closing the socket"; FAILED_HANDLER(gu::AsioErrorCode(EPROTO)); } else if (send_q_.front().len() < bytes_transferred #ifdef GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR || ::rand() % bytes_transferred_less_than_rate == 0 #endif // GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR ) { log_warn << "write_handler() bytes_transferred " << bytes_transferred << " less than sent " << send_q_.front().len() << ". Transport may not be reliable, closing the socket"; FAILED_HANDLER(gu::AsioErrorCode(EPROTO)); } else { while (send_q_.empty() == false && bytes_transferred >= send_q_.front().len()) { const Datagram& dg(send_q_.front()); bytes_transferred -= dg.len(); send_q_.pop_front(); } log_debug << "AsioTcpSocket::write_handler() after queue purge " << socket_ << " send_q " << send_q_.size(); if (bytes_transferred != 0 #ifdef GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR || ::rand() % bytes_transferred_not_zero_rate == 0 #endif // GCOMM_ASIO_TCP_SIMULATE_WRITE_HANDLER_ERROR ) { log_warn << "write_handler() bytes_transferred " << bytes_transferred << " after processing the send_q_. " << "Transport may not be reliable, closing the socket"; FAILED_HANDLER(gu::AsioErrorCode(EPROTO)); } else if (send_q_.empty() == false) { const Datagram& dg(send_q_.front()); std::array cbs; cbs[0] = gu::AsioConstBuffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[1] = gu::AsioConstBuffer(dg.payload().data(), dg.payload().size()); socket_->async_write(cbs, shared_from_this()); } else if (state_ == S_CLOSING) { log_debug << "deferred close of " << id(); become_closed(); } } } else if (state_ == S_CLOSING) { log_debug << "deferred close of " << id() << " error " << ec; become_closed(); } else { FAILED_HANDLER(ec); } } void gcomm::AsioTcpSocket::set_option(const std::string& key, const std::string& val) { // Currently adjustable socket.recv_buf_size and socket.send_buf_size // bust be set before the connection is established, so the runtime // setting will not be effective. log_warn << "Setting " << key << " in run time does not have effect, " << "please set the configuration in provider options " << "and restart"; } namespace gcomm { class AsioPostForSendHandler { public: AsioPostForSendHandler(const std::shared_ptr& socket) : socket_(socket) { } void operator()() { log_debug << "AsioPostForSendHandler " << socket_->socket_; Critical crit(socket_->net_); // Send queue is processed also in closing state // in order to deliver as many messages as possible, // even if the socket has been discarded by // upper layers. if ((socket_->state() == gcomm::Socket::S_CONNECTED || socket_->state() == gcomm::Socket::S_CLOSING) && socket_->send_q_.empty() == false) { const gcomm::Datagram& dg(socket_->send_q_.front()); std::array cbs; cbs[0] = gu::AsioConstBuffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[1] = gu::AsioConstBuffer(dg.payload().data(), dg.payload().size()); socket_->socket_->async_write(cbs, socket_); } } private: std::shared_ptr socket_; }; } int gcomm::AsioTcpSocket::send(int segment, const Datagram& dg) { Critical crit(net_); log_debug << "AsioTcpSocket::send() socket " << socket_ << " state " << state_ << " send_q " << send_q_.size(); if (state() != S_CONNECTED) { return ENOTCONN; } if (send_q_.size() >= max_send_q_bytes) { return ENOBUFS; } NetHeader hdr(static_cast(dg.len()), net_.version_); if (net_.checksum_ != NetHeader::CS_NONE) { hdr.set_crc32(crc32(net_.checksum_, dg), net_.checksum_); } last_queued_tstamp_ = gu::datetime::Date::monotonic(); // Make copy of datagram to be able to adjust the header Datagram priv_dg(dg); priv_dg.set_header_offset(priv_dg.header_offset() - NetHeader::serial_size_); serialize(hdr, priv_dg.header(), priv_dg.header_size(), priv_dg.header_offset()); send_q_.push_back(segment, priv_dg); if (send_q_.size() == 1) { net_.io_service_.post(AsioPostForSendHandler(shared_from_this())); } return 0; } void gcomm::AsioTcpSocket::read_handler(gu::AsioSocket& socket, const gu::AsioErrorCode& ec, const size_t bytes_transferred) { Critical crit(net_); if (ec) { if (not gu::is_verbose_error(ec)) { log_warn << "read_handler(): " << ec.message() << " (" << gu::extra_error_info(ec) << ")"; } if (state() == S_CLOSING) { log_debug << "read handler for " << id() << " closing"; become_closed(); } FAILED_HANDLER(ec); return; } if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "read handler for " << id() << " state " << state(); return; } recv_offset_ += bytes_transferred; while (recv_offset_ >= NetHeader::serial_size_) { NetHeader hdr; try { unserialize(&recv_buf_[0], recv_buf_.size(), 0, hdr); } catch (gu::Exception& e) { FAILED_HANDLER(gu::AsioErrorCode(e.get_errno())); return; } if (recv_offset_ >= hdr.len() + NetHeader::serial_size_) { Datagram dg( gu::SharedBuffer( new gu::Buffer(&recv_buf_[0] + NetHeader::serial_size_, &recv_buf_[0] + NetHeader::serial_size_ + hdr.len()))); if (net_.checksum_ != NetHeader::CS_NONE) { #ifdef TEST_NET_CHECKSUM_ERROR long rnd(rand()); if (rnd % 10000 == 0) { hdr.set_crc32(net_.checksum_, static_cast(rnd)); } #endif /* TEST_NET_CHECKSUM_ERROR */ if (check_cs (hdr, dg)) { log_warn << "checksum failed, hdr: len=" << hdr.len() << " has_crc32=" << hdr.has_crc32() << " has_crc32c=" << hdr.has_crc32c() << " crc32=" << hdr.crc32(); FAILED_HANDLER(gu::AsioErrorCode(EPROTO)); return; } } ProtoUpMeta um; last_delivered_tstamp_ = gu::datetime::Date::monotonic(); net_.dispatch(id(), dg, um); recv_offset_ -= NetHeader::serial_size_ + hdr.len(); if (recv_offset_ > 0) { memmove(&recv_buf_[0], &recv_buf_[0] + NetHeader::serial_size_ + hdr.len(), recv_offset_); } } else { break; } } if (socket_->is_open()) { socket_->async_read(gu::AsioMutableBuffer( &recv_buf_[0] + recv_offset_, recv_buf_.size() - recv_offset_), shared_from_this()); } } size_t gcomm::AsioTcpSocket::read_completion_condition( gu::AsioSocket&, const gu::AsioErrorCode& ec, const size_t bytes_transferred) { Critical crit(net_); if (ec) { return 0; } if (state() != S_CONNECTED && state() != S_CLOSING) { log_debug << "read completion condition for " << id() << " state " << state(); return 0; } if (recv_offset_ + bytes_transferred >= NetHeader::serial_size_) { NetHeader hdr; try { unserialize(&recv_buf_[0], NetHeader::serial_size_, 0, hdr); } catch (const gu::Exception& e) { log_warn << "Failed to unserialize message. This may be a " << "result of corrupt message, port scanner or " << "another application connecting to " << "group communication port."; FAILED_HANDLER(gu::AsioErrorCode(e.get_errno())); return 0; } if (recv_offset_ + bytes_transferred >= NetHeader::serial_size_ + hdr.len()) { return 0; } } return (recv_buf_.size() - recv_offset_); } void gcomm::AsioTcpSocket::async_receive() { Critical crit(net_); gcomm_assert(state() == S_CONNECTED); socket_->async_read(gu::AsioMutableBuffer(&recv_buf_[0], recv_buf_.size()), shared_from_this()); } size_t gcomm::AsioTcpSocket::mtu() const { return net_.mtu(); } std::string gcomm::AsioTcpSocket::local_addr() const { return socket_->local_addr(); } std::string gcomm::AsioTcpSocket::remote_addr() const { return socket_->remote_addr(); } void gcomm::AsioTcpSocket::set_buf_sizes() { set_recv_buf_size_helper(net_.conf(), socket_); set_send_buf_size_helper(net_.conf(), socket_); } void gcomm::AsioTcpSocket::cancel_deferred_close_timer() { auto timer(deferred_close_timer_.lock()); if (timer) timer->cancel(); } void gcomm::AsioTcpSocket::become_closed() { socket_->close(); cancel_deferred_close_timer(); state_ = S_CLOSED; } gcomm::SocketStats gcomm::AsioTcpSocket::stats() const { SocketStats ret; try { auto tcpi(socket_->get_tcp_info()); ret.rtt = tcpi.tcpi_rtt; ret.rttvar = tcpi.tcpi_rttvar; ret.rto = tcpi.tcpi_rto; #if defined(__linux__) ret.lost = tcpi.tcpi_lost; #else ret.lost = 0; #endif /* __linux__ */ ret.last_data_recv = tcpi.tcpi_last_data_recv; ret.cwnd = tcpi.tcpi_snd_cwnd; gu::datetime::Date now(gu::datetime::Date::monotonic()); Critical crit(net_); ret.last_queued_since = (now - last_queued_tstamp_).get_nsecs(); ret.last_delivered_since = (now - last_delivered_tstamp_).get_nsecs(); ret.send_queue_length = send_q_.size(); ret.send_queue_bytes = send_q_.queued_bytes(); ret.send_queue_segments = send_q_.segments(); } catch (...) { } return ret; } gcomm::AsioTcpAcceptor::AsioTcpAcceptor(AsioProtonet& net, const gu::URI& uri) : Acceptor (uri), net_ (net), acceptor_ (net_.io_service_.make_acceptor(uri)), next_socket_() { } gcomm::AsioTcpAcceptor::~AsioTcpAcceptor() { close(); } void gcomm::AsioTcpAcceptor::accept_handler( gu::AsioAcceptor&, const std::shared_ptr& accepted_socket, const gu::AsioErrorCode& error) { if (!error) { next_socket_->socket_ = accepted_socket; /* Notify upper layer which then calls accept() to acquire ownership. */ net_.dispatch(id(), Datagram(), ProtoUpMeta(error.value())); assert(not next_socket_); } if (acceptor_->is_open()) { acceptor_->async_accept( shared_from_this(), next_socket_ = std::make_shared(net_, uri_, nullptr)); } } void gcomm::AsioTcpAcceptor::set_buf_sizes() { set_recv_buf_size_helper(net_.conf(), acceptor_); set_send_buf_size_helper(net_.conf(), acceptor_); } void gcomm::AsioTcpAcceptor::listen(const gu::URI& uri) { acceptor_->open(uri); set_buf_sizes(); // Must be done before listen acceptor_->listen(uri); next_socket_ = std::make_shared(net_, uri_, nullptr); acceptor_->async_accept(shared_from_this(), next_socket_); } std::string gcomm::AsioTcpAcceptor::listen_addr() const { return acceptor_->listen_addr(); } void gcomm::AsioTcpAcceptor::close() { acceptor_->close(); } gcomm::SocketPtr gcomm::AsioTcpAcceptor::accept() { /* Note that the socket is not flagged as connected yet, it may * still be in the middle of the handshake, e.g. if TLS is used. * Once the handshake is complete, connect_handler() will be called * which will notify the upper layer that the socket is ready. */ auto ret = next_socket_; next_socket_ = nullptr; return ret; } galera-4-26.4.25/gcomm/src/asio_protonet.cpp000644 000164 177776 00000006252 15107057155 022003 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2019 Codership Oy */ #include "asio_tcp.hpp" #include "asio_udp.hpp" #include "asio_protonet.hpp" #include "socket.hpp" #include "gcomm/util.hpp" #include "gcomm/conf.hpp" #include "gu_logger.hpp" #include "gu_shared_ptr.hpp" #include #include gcomm::AsioProtonet::AsioProtonet(gu::Config& conf, int version) : gcomm::Protonet(conf, "asio", version), timer_expired_(false), mutex_(), poll_until_(gu::datetime::Date::max()), io_service_(conf), timer_handler_(std::make_shared(*this)), timer_(io_service_), mtu_(1 << 15), checksum_(NetHeader::checksum_type( conf.get(gcomm::Conf::SocketChecksum, NetHeader::CS_CRC32C))) { conf.set(gcomm::Conf::SocketChecksum, checksum_); } gcomm::AsioProtonet::~AsioProtonet() { } void gcomm::AsioProtonet::enter() { mutex_.lock(); } void gcomm::AsioProtonet::leave() { mutex_.unlock(); } gcomm::SocketPtr gcomm::AsioProtonet::socket(const gu::URI& uri) { if (uri.get_scheme() == "tcp" || uri.get_scheme() == "ssl") { return std::make_shared(*this, uri); } else if (uri.get_scheme() == "udp") { return std::make_shared(*this, uri); } else { gu_throw_fatal << "scheme '" << uri.get_scheme() << "' not implemented"; } } std::shared_ptr gcomm::AsioProtonet::acceptor( const gu::URI& uri) { return std::make_shared(*this, uri); } gu::datetime::Period handle_timers_helper(gcomm::Protonet& pnet, const gu::datetime::Period& period) { const gu::datetime::Date now(gu::datetime::Date::monotonic()); const gu::datetime::Date stop(now + period); const gu::datetime::Date next_time(pnet.handle_timers()); const gu::datetime::Period sleep_p(std::min(stop - now, next_time - now)); return (sleep_p < 0 ? 0 : sleep_p); } size_t gcomm::AsioProtonet::event_loop(const gu::datetime::Period& period) { io_service_.reset(); poll_until_ = gu::datetime::Date::monotonic() + period; const gu::datetime::Period p(handle_timers_helper(*this, period)); // Use microsecond precision to avoid // "the resulting duration is not exactly representable" // static assertion with GCC 4.4. timer_expired_ = false; timer_.expires_from_now(std::chrono::microseconds(p.get_nsecs()/1000)); timer_.async_wait(timer_handler_); size_t count = io_service_.run(); timer_.cancel(); return timer_expired_ ? count - 1 : count; } void gcomm::AsioProtonet::dispatch(const SocketId& id, const Datagram& dg, const ProtoUpMeta& um) { for (std::deque::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->dispatch(id, dg, um); } } void gcomm::AsioProtonet::interrupt() { io_service_.stop(); } void gcomm::AsioProtonet::handle_wait(const gu::AsioErrorCode& ec) { if (ec) { return; } timer_expired_ = true; io_service_.stop(); } galera-4-26.4.25/gcomm/src/gmcast_proto.hpp000644 000164 177776 00000016301 15107057155 021620 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ #ifndef GCOMM_GMCAST_PROTO_HPP #define GCOMM_GMCAST_PROTO_HPP #include "gu_datetime.hpp" #include "gcomm/uuid.hpp" #include "gcomm/util.hpp" #include "socket.hpp" #include "gmcast_message.hpp" #include "gmcast_link.hpp" namespace gcomm { namespace gmcast { class ProtoContext; class Proto; class ProtoMap; std::ostream& operator<<(std::ostream& os, const Proto& p); } } /* Local node context for proto entries */ struct gcomm::gmcast::ProtoContext { virtual ~ProtoContext() = default; /* Return UUID of the local node */ virtual const gcomm::UUID& node_uuid() const = 0; /* Return true if the proto entry is owned by the local node */ virtual bool is_own(const Proto*) const = 0; /* Blacklist proto entry */ virtual void blacklist(const Proto*) = 0; /* Return true if the proto entry is not owned by the local node * and there already is a proto entry with the same remote UUID * but with different address. */ virtual bool is_not_own_and_duplicate_exists(const Proto*) const = 0; /* Return true if the proto entry is evicted */ virtual bool is_proto_evicted(const Proto*) const = 0; /* Return true if the primary view has been reached */ virtual bool prim_view_reached() const = 0; /* Remove viewstate file */ virtual void remove_viewstate_file() const = 0; /* Return string of the local node */ virtual std::string self_string() const = 0; }; class gcomm::gmcast::Proto { public: /* * | ----- connect ------> | * HANDSHAKE_WAIT | | --- * | | | accept() * | | <-- * | | HANDSHAKE_SENT * | <---- handshake ----- | * HANDSHAKE_RESPONSE_SENT | | * | -- handshake resp --> | * | | OK * | <------- ok --------- | * OK | | */ enum State { S_INIT, S_HANDSHAKE_SENT, S_HANDSHAKE_WAIT, S_HANDSHAKE_RESPONSE_SENT, S_OK, S_FAILED, S_CLOSED }; public: void set_state(State new_state); State state() const { return state_; } static std::string to_string (State s) { switch (s) { case S_INIT: return "INIT"; case S_HANDSHAKE_SENT: return "HANDSHAKE_SENT"; case S_HANDSHAKE_WAIT: return "HANDSHAKE_WAIT"; case S_HANDSHAKE_RESPONSE_SENT: return "HANDSHAKE_RESPONSE_SENT"; case S_OK: return "OK"; case S_FAILED: return "FAILED"; case S_CLOSED: return "CLOSED"; default: return "UNKNOWN"; } } Proto (ProtoContext& context, int version, SocketPtr tp, const std::string& local_addr, const std::string& remote_addr, const std::string& mcast_addr, uint8_t local_segment, const std::string& group_name) : version_ (version), handshake_uuid_ (), remote_uuid_ (), local_segment_ (local_segment), remote_segment_ (0), local_addr_ (local_addr), remote_addr_ (remote_addr), mcast_addr_ (mcast_addr), group_name_ (group_name), changed_ (false), state_ (S_INIT), propagate_remote_ (false), tp_ (tp), link_map_ (), send_tstamp_ (gu::datetime::Date::monotonic()), recv_tstamp_ (gu::datetime::Date::monotonic()), context_ (context) { } ~Proto() { tp_->close(); tp_ = nullptr; } void send_msg(const Message& msg, bool ignore_no_buffer_space); void send_handshake(); void wait_handshake(); /* * Validate handshake UUID. * * Validate UUID of the remote endpoint. * * @return False if UUID is found to be duplicate * of existing UUIDs and the remote endpoint cannot * be associated with any of the existing connections, * otherwise true is returned. * @throw Throws gu::Exception with ENOTRECOVERABLE errno if * the node should abort due to duplicate UUID. */ bool validate_handshake_uuid(); void handle_handshake(const Message& hs); void handle_handshake_response(const Message& hs); void handle_ok(const Message& hs); void handle_failed(const Message& hs); void handle_topology_change(const Message& msg); void handle_keepalive(const Message& msg); void send_topology_change(LinkMap& um); void handle_message(const Message& msg); void send_keepalive(); void evict(); /** * Send FAIL message to other endpoint with duplicate UUID * error status. */ void evict_duplicate_uuid(); const gcomm::UUID& handshake_uuid() const { return handshake_uuid_; } const gcomm::UUID& local_uuid() const; const gcomm::UUID& remote_uuid() const { return remote_uuid_; } uint8_t remote_segment() const { return remote_segment_; } SocketPtr socket() const { return tp_; } const std::string& remote_addr() const { return remote_addr_; } const std::string& mcast_addr() const { return mcast_addr_; } const LinkMap& link_map() const { return link_map_; } /** * Check if the internal state of the proto entry was changed * after the last call and reset the changed state to false. * * @return True if the state was changed after the last call, * otherwise false. */ bool check_changed_and_reset() { bool ret = changed_; changed_ = false; return ret; } int version() const { return version_; } void set_recv_tstamp(gu::datetime::Date ts) { recv_tstamp_ = ts; } gu::datetime::Date recv_tstamp() const { return recv_tstamp_; } void set_send_tstamp(gu::datetime::Date ts) { send_tstamp_ = ts; } gu::datetime::Date send_tstamp() const { return send_tstamp_; } private: friend std::ostream& operator<<(std::ostream&, const Proto&); Proto(const Proto&); void operator=(const Proto&); int version_; gcomm::UUID handshake_uuid_; gcomm::UUID remote_uuid_; uint8_t local_segment_; uint8_t remote_segment_; std::string local_addr_; std::string remote_addr_; std::string mcast_addr_; std::string group_name_; bool changed_; State state_; bool propagate_remote_; SocketPtr tp_; LinkMap link_map_; gu::datetime::Date send_tstamp_; gu::datetime::Date recv_tstamp_; ProtoContext& context_; }; class gcomm::gmcast::ProtoMap : public Map { }; #endif // !GCOMM_GMCAST_PROTO_HPP galera-4-26.4.25/gcomm/src/pc_proto.hpp000644 000164 177776 00000024024 15107057155 020745 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_PC_PROTO_HPP #define GCOMM_PC_PROTO_HPP #include #include #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/conf.hpp" #include "pc_message.hpp" #include "defaults.hpp" #include "gu_uri.hpp" #include "gu_mutex.hpp" #include "gu_cond.hpp" namespace gcomm { namespace pc { class Proto; class ProtoBuilder; std::ostream& operator<<(std::ostream& os, const Proto& p); } } class gcomm::pc::Proto : public Protolay { public: enum State { S_CLOSED, S_STATES_EXCH, S_INSTALL, S_PRIM, S_TRANS, S_NON_PRIM, S_MAX }; static std::string to_string(const State s) { switch (s) { case S_CLOSED: return "CLOSED"; case S_STATES_EXCH: return "STATES_EXCH"; case S_INSTALL: return "INSTALL"; case S_TRANS: return "TRANS"; case S_PRIM: return "PRIM"; case S_NON_PRIM: return "NON_PRIM"; default: gu_throw_fatal << "Invalid state"; } } Proto(gu::Config& conf, const UUID& uuid, SegmentId segment, const gu::URI& uri = gu::URI("pc://"), View* rst_view = NULL) : Protolay(conf), my_uuid_ (uuid), start_prim_ (), npvo_ (param(conf, uri, Conf::PcNpvo, Defaults::PcNpvo)), ignore_quorum_ (param(conf, uri, Conf::PcIgnoreQuorum, Defaults::PcIgnoreQuorum)), ignore_sb_ (param(conf, uri, Conf::PcIgnoreSb, gu::to_string(ignore_quorum_))), closing_ (false), state_ (S_CLOSED), last_sent_seq_ (0), checksum_ (param(conf, uri, Conf::PcChecksum, Defaults::PcChecksum)), instances_ (), self_i_ (instances_.insert_unique(std::make_pair(uuid, Node()))), state_msgs_ (), current_view_ (0, V_NONE), pc_view_ (0, V_NON_PRIM), views_ (), mtu_ (std::numeric_limits::max()), weight_ (check_range(Conf::PcWeight, param(conf, uri, Conf::PcWeight, Defaults::PcWeight), 0, 0xff)), rst_view_ (), sync_param_mutex_ (), sync_param_cond_ (), param_sync_set_ (0) { set_weight(weight_); NodeMap::value(self_i_).set_segment(segment); if (rst_view) { set_restored_view(rst_view); } conf.set(Conf::PcNpvo, gu::to_string(npvo_)); conf.set(Conf::PcIgnoreQuorum, gu::to_string(ignore_quorum_)); conf.set(Conf::PcIgnoreSb, gu::to_string(ignore_sb_)); conf.set(Conf::PcChecksum, gu::to_string(checksum_)); conf.set(Conf::PcWeight, gu::to_string(weight_)); } ~Proto() { } const UUID& uuid() const { return my_uuid_; } bool prim() const { return NodeMap::value(self_i_).prim(); } void set_prim(const bool val) { NodeMap::value(self_i_).set_prim(val); } void mark_non_prim(); const ViewId& last_prim() const { return NodeMap::value(self_i_).last_prim(); } void set_last_prim(const ViewId& vid) { gcomm_assert(vid.type() == V_PRIM); NodeMap::value(self_i_).set_last_prim(vid); } uint32_t last_seq() const { return NodeMap::value(self_i_).last_seq(); } void set_last_seq(const uint32_t seq) { NodeMap::value(self_i_).set_last_seq(seq); } int64_t to_seq() const { return NodeMap::value(self_i_).to_seq(); } void set_to_seq(const int64_t seq) { NodeMap::value(self_i_).set_to_seq(seq); } void set_weight(int weight) { NodeMap::value(self_i_).set_weight(weight); } class SMMap : public Map { }; const View& current_view() const { return current_view_; } const UUID& self_id() const { return my_uuid_; } State state() const { return state_; } void shift_to (State); void send_state (); int send_install(bool bootstrap, int weight = -1); void handle_first_trans (const View&); void handle_trans (const View&); void handle_reg (const View&); void handle_msg (const Message&, const Datagram&, const ProtoUpMeta&); void handle_up (const void*, const Datagram&, const ProtoUpMeta&); int handle_down (Datagram&, const ProtoDownMeta&); void connect(bool first) { log_debug << self_id() << " start_prim " << first; start_prim_ = first; closing_ = false; shift_to(S_NON_PRIM); } void close(bool force = false) { closing_ = true; } void handle_view (const View&); bool set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb); void sync_param(); void set_mtu(size_t mtu) { mtu_ = mtu; } size_t mtu() const { return mtu_; } void set_restored_view(View* rst_view) { gcomm_assert(state_ == S_CLOSED); rst_view_ = rst_view; NodeMap::value(self_i_).set_last_prim( // set last prim just for exchanging uuid and seq. // but actually restored view is not actual prim view. ViewId(V_NON_PRIM, rst_view -> id().uuid(), rst_view -> id().seq())); } const View* restored_view() const { return rst_view_; } int cluster_weight() const; private: friend std::ostream& operator<<(std::ostream& os, const Proto& p); // Helper class to construct Proto states for unit tests friend class ProtoBuilder; Proto(gu::Config& conf, const UUID& uuid) : Protolay(conf), my_uuid_ (uuid), start_prim_ (), npvo_ (), ignore_quorum_ (), ignore_sb_ (), closing_ (), state_ (), last_sent_seq_ (), checksum_ (), instances_ (), self_i_ (), state_msgs_ (), current_view_ (0, V_NONE), pc_view_ (0, V_NON_PRIM), views_ (), mtu_ (std::numeric_limits::max()), weight_ (), rst_view_ (), sync_param_mutex_ (), sync_param_cond_ (), param_sync_set_ (0) { } Proto (const Proto&); Proto& operator=(const Proto&); bool requires_rtr() const; bool is_prim() const; bool have_quorum(const View&, const View&) const; bool have_split_brain(const View&) const; void validate_state_msgs() const; void cleanup_instances(); void handle_state(const Message&, const UUID&); void handle_install(const Message&, const UUID&); void handle_trans_install(const Message&, const UUID&); void handle_user(const Message&, const Datagram&, const ProtoUpMeta&); void deliver_view(bool bootstrap = false); UUID const my_uuid_; // Node uuid bool start_prim_; // Is allowed to start in prim comp bool npvo_; // Newer prim view overrides bool ignore_quorum_; // Ignore lack of quorum bool ignore_sb_; // Ignore split-brain condition bool closing_; // Protocol is in closing stage State state_; // State uint32_t last_sent_seq_; // Msg seqno of last sent message bool checksum_; // Enable message checksumming NodeMap instances_; // Map of known node instances NodeMap::iterator self_i_; // Iterator pointing to self node instance SMMap state_msgs_; // Map of received state messages View current_view_; // EVS view View pc_view_; // PC view std::list views_; // List of seen views size_t mtu_; // Maximum transmission unit int weight_; // Node weight in voting View* rst_view_; // restored PC view gu::Mutex sync_param_mutex_; gu::Cond sync_param_cond_; bool param_sync_set_; }; class gcomm::pc::ProtoBuilder { public: ProtoBuilder() : conf_() , uuid_() , state_msgs_() , current_view_() , pc_view_() , instances_() , state_(Proto::S_CLOSED) { } Proto* make_proto() { gcomm_assert(uuid_ != UUID::nil()); Proto* ret(new Proto(conf_, uuid_)); ret->state_msgs_ = state_msgs_; ret->current_view_ = current_view_; ret->pc_view_ = pc_view_; ret->instances_ = instances_; ret->self_i_ = ret->instances_.find_checked(uuid_); ret->state_ = state_; return ret; } ProtoBuilder& conf(const gu::Config& conf) { conf_ = conf; return *this; } ProtoBuilder& uuid(const gcomm::UUID& uuid) { uuid_ = uuid; return *this; } ProtoBuilder& state_msgs(const Proto::SMMap& state_msgs) { state_msgs_ = state_msgs; return *this; } ProtoBuilder& current_view(const gcomm::View& current_view) { current_view_ = current_view; return *this; } ProtoBuilder& pc_view(const gcomm::View& pc_view) { pc_view_ = pc_view; return *this; } ProtoBuilder& instances(const NodeMap& instances) { instances_ = instances; return *this; } ProtoBuilder& state(enum Proto::State state) { state_ = state; return *this; } private: gu::Config conf_; gcomm::UUID uuid_; Proto::SMMap state_msgs_; gcomm::View current_view_; gcomm::View pc_view_; NodeMap instances_; enum Proto::State state_; }; #endif // PC_PROTO_HPP galera-4-26.4.25/gcomm/src/evs_consensus.hpp000644 000164 177776 00000003611 15107057155 022014 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ #ifndef GCOMM_EVS_CONSENSUS_HPP #define GCOMM_EVS_CONSENSUS_HPP #include "evs_seqno.hpp" namespace gcomm { class UUID; class View; namespace evs { class NodeMap; class InputMap; class Message; class Consensus; class Proto; } } class gcomm::evs::Consensus { public: Consensus(const Proto& proto, const NodeMap& known, const InputMap& input_map, const View& current_view) : proto_ (proto), known_ (known), input_map_ (input_map), current_view_(current_view) { } /*! * Compare two messages if they are equal in consensus context. */ bool equal(const Message&, const Message&) const; /*! * Compute highest reachable safe seq from local state. * * @return Highest reachable safe seq. */ seqno_t highest_reachable_safe_seq() const; // input map safe seq but without considering // all suspected leaving nodes. seqno_t safe_seq_wo_all_susupected_leaving_nodes() const; /*! * Check if highest reachable safe seq according to message * consistent with local state. */ bool is_consistent_highest_reachable_safe_seq(const Message&) const; /*! * Check if message aru seq, safe seq and node ranges matches to * local state. */ bool is_consistent_input_map(const Message&) const; bool is_consistent_partitioning(const Message&) const; bool is_consistent_leaving(const Message&) const; bool is_consistent_same_view(const Message&) const; bool is_consistent(const Message&) const; bool is_consensus() const; private: const Proto& proto_; const NodeMap& known_; const InputMap& input_map_; const View& current_view_; }; #endif // GCOMM_EVS_CONSENSUS_HPP galera-4-26.4.25/gcomm/src/gmcast.hpp000644 000164 177776 00000027432 15107057155 020404 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ /* * Generic multicast transport. Uses tcp connections if real multicast * is not available. */ #ifndef GCOMM_GMCAST_HPP #define GCOMM_GMCAST_HPP #include "gmcast_proto.hpp" #include "gcomm/uuid.hpp" #include "gcomm/exception.hpp" #include "gcomm/transport.hpp" #include "gcomm/types.hpp" #include #ifndef GCOMM_GMCAST_MAX_VERSION #define GCOMM_GMCAST_MAX_VERSION 0 #endif // GCOMM_GMCAST_MAX_VERSION namespace gcomm { namespace gmcast { class Proto; class Node; class Message; } class GMCast : public Transport, public gmcast::ProtoContext { public: GMCast (Protonet&, const gu::URI&, const UUID* my_uuid = NULL); ~GMCast(); // Protolay interface void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram&, const ProtoDownMeta&); void handle_stable_view(const View& view) override; void handle_allow_connect(const UUID& uuid) override; void handle_evict(const UUID& uuid) override; std::string handle_get_address(const UUID& uuid) const; bool set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb); // Transport interface const UUID& uuid() const { return my_uuid_; } SegmentId segment() const { return segment_; } void connect_precheck(bool start_prim); void connect(); void connect(const gu::URI&); void close(bool force = false); void close(const UUID& uuid) { gmcast_forget(uuid, time_wait_); } void listen() { gu_throw_fatal << "gmcast transport listen not implemented"; } // Configured listen address std::string configured_listen_addr() const { return listen_addr_; } // Listen adddress obtained from listening socket. std::string listen_addr() const { if (listener_ == 0) { gu_throw_error(ENOTCONN) << "not connected"; } return listener_->listen_addr(); } Transport* accept() { gu_throw_fatal << "gmcast transport accept not implemented"; } size_t mtu() const { return pnet_.mtu() - (4 + UUID::serial_size()); } private: GMCast (const GMCast&); GMCast& operator=(const GMCast&); static const long max_retry_cnt_; class AddrEntry { public: AddrEntry(const gu::datetime::Date& last_seen, const gu::datetime::Date& next_reconnect, const UUID& uuid) : uuid_ (uuid), last_seen_ (last_seen), next_reconnect_ (next_reconnect), last_connect_ (0), retry_cnt_ (0), max_retries_ (0) { } AddrEntry(const AddrEntry& other) : uuid_(other.uuid_), last_seen_(other.last_seen_), next_reconnect_(other.next_reconnect_), last_connect_(other.last_connect_), retry_cnt_(other.retry_cnt_), max_retries_(other.max_retries_) { } const UUID& uuid() const { return uuid_; } void set_last_seen(const gu::datetime::Date& d) { last_seen_ = d; } const gu::datetime::Date& last_seen() const { return last_seen_; } void set_next_reconnect(const gu::datetime::Date& d) { next_reconnect_ = d; } const gu::datetime::Date& next_reconnect() const { return next_reconnect_; } void set_last_connect() { last_connect_ = gu::datetime::Date::monotonic(); } const gu::datetime::Date& last_connect() const { return last_connect_; } void set_retry_cnt(const int r) { retry_cnt_ = r; } int retry_cnt() const { return retry_cnt_; } void set_max_retries(int mr) { max_retries_ = mr; } int max_retries() const { return max_retries_; } private: friend std::ostream& operator<<(std::ostream&, const AddrEntry&); void operator=(const AddrEntry&); UUID uuid_; gu::datetime::Date last_seen_; gu::datetime::Date next_reconnect_; gu::datetime::Date last_connect_; int retry_cnt_; int max_retries_; }; typedef Map AddrList; class AddrListUUIDCmp { public: AddrListUUIDCmp(const UUID& uuid) : uuid_(uuid) { } bool operator()(const AddrList::value_type& cmp) const { return (cmp.second.uuid() == uuid_); } private: UUID uuid_; }; int version_; static const int max_version_ = GCOMM_GMCAST_MAX_VERSION; uint8_t segment_; UUID my_uuid_; bool dynamic_socket_; bool use_ssl_; std::string group_name_; std::string listen_addr_; std::set initial_addrs_; std::string mcast_addr_; std::string bind_ip_; int mcast_ttl_; std::shared_ptr listener_; SocketPtr mcast_; AddrList pending_addrs_; AddrList remote_addrs_; AddrList addr_blacklist_; bool relaying_; int isolate_; bool prim_view_reached_; gmcast::ProtoMap* proto_map_; public: struct RelayEntry { gmcast::Proto* proto; gcomm::Socket* socket; RelayEntry(gmcast::Proto* p, gcomm::Socket* s) : proto(p), socket(s) { } bool operator<(const RelayEntry& other) const { return (socket < other.socket); } }; typedef std::set RelaySet; /* * Compute minimal set of proto entries required to reach * maximum set of nonlive peers. * * @param[in,out] proto_set Set of proto entries * @param[in,out] nonlive_uuids Set of nonlive peer UUIDs * @param segment Segment ID * * @return Minimal set of proto entries required to reach */ static RelaySet compute_relay_set(const std::set& proto_set, std::set& nonlive_uuids, uint8_t segment); private: static void populate_relay_set(std::set& nonlive_uuids, std::set& lookup_set, gcomm::GMCast::RelaySet& relay_set); RelaySet relay_set_; void send(const RelayEntry&, int segment, gcomm::Datagram& dg); typedef std::vector Segment; typedef std::map SegmentMap; SegmentMap segment_map_; // self index in local segment when ordered by UUID size_t self_index_; gu::datetime::Period time_wait_; gu::datetime::Period check_period_; gu::datetime::Period peer_timeout_; int max_initial_reconnect_attempts_; gu::datetime::Date next_check_; gu::datetime::Date handle_timers(); /* Begin of ProtoContext implementation */ /* Return UUID of the local node */ const gcomm::UUID& node_uuid() const override { return my_uuid_; } /* * Checks if the proto is a remote connection point for * locally originated connection. The proto * is required to have gone through initial handshake * sequence so that the remote endpoint UUID is known. * * @param proto Protocol entry * * @return True if matching entry was found and blacklisted, * false otherwise. */ bool is_own(const gmcast::Proto *proto) const override; /* * Add a proto entry to blacklist. After calling this reconnect * attempts to remote endpoint corresponding to proto are * disabled. * * @param proto Proto entry to be blacklisted. */ void blacklist(const gmcast::Proto* proto) override; /* * Check if the proto entry is not originated from own * connection and there already is a proto entry with * the same remote UUID but with different address. * * It is required that the proto has received handshake * message from remote endpoint so that the remote * endpoint identity is known. * */ bool is_not_own_and_duplicate_exists(const gmcast::Proto* proto) const override; bool is_proto_evicted(const gmcast::Proto* proto) const override { return is_evicted(proto->remote_uuid()); } /** * Return boolean denoting if the primary view has been reached. */ bool prim_view_reached() const override { return prim_view_reached_; } /* Remove viewstate file */ void remove_viewstate_file() const override { ViewState::remove_file(conf_); } /* Return string of the local node */ std::string self_string() const override { std::ostringstream os; os << '(' << my_uuid_ << ", '" << listen_addr_ << "')"; return os.str(); } /* End of ProtoContext implementation */ // Erase ProtoMap entry in a safe way so that all lookup lists // become properly updated. void erase_proto(gmcast::ProtoMap::iterator); // Accept new connection void gmcast_accept(); // Initialize connecting to remote host void gmcast_connect(const std::string&); // Forget node void gmcast_forget(const gcomm::UUID&, const gu::datetime::Period&); // Handle proto entry that has established connection to remote host void handle_connected(gmcast::Proto*); // Handle proto entry that has successfully finished handshake // sequence void handle_established(gmcast::Proto*); // Handle proto entry that has failed void handle_failed(gmcast::Proto*); // Check if there exists connection that matches to either // remote addr or uuid bool is_connected(const std::string& addr, const UUID& uuid) const; // Inset address to address list void insert_address(const std::string& addr, const UUID& uuid, AddrList&); // Scan through proto entries and update address lists void update_addresses(); // void check_liveness(); void relay(const gmcast::Message& msg, const Datagram& dg, const void* exclude_id); // Reconnecting void reconnect(); void disable_reconnect(AddrList::value_type&); void enable_reconnect(AddrList::value_type&); void set_initial_addr(const gu::URI&); void add_or_del_addr(const std::string&); friend std::ostream& operator<<(std::ostream&, const AddrEntry&); }; inline std::ostream& operator<<(std::ostream& os, const GMCast::AddrEntry& ae) { return (os << ae.uuid_ << " last_seen=" << ae.last_seen_ << " next_reconnect=" << ae.next_reconnect_ << " retry_cnt=" << ae.retry_cnt_); } } #endif // GCOMM_GMCAST_HPP galera-4-26.4.25/gcomm/src/gmcast_message.hpp000644 000164 177776 00000027014 15107057155 022104 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ #ifndef GCOMM_GMCAST_MESSAGE_HPP #define GCOMM_GMCAST_MESSAGE_HPP #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gmcast_node.hpp" #include "gcomm/map.hpp" namespace gcomm { namespace gmcast { class Message; } } class gcomm::gmcast::Message { public: enum Flags { F_GROUP_NAME = 1 << 0, F_NODE_NAME = 1 << 1, F_NODE_ADDRESS_OR_ERROR = 1 << 2, F_NODE_LIST = 1 << 3, F_HANDSHAKE_UUID = 1 << 4, // relay message to all peers in the same segment (excluding source) // and to all other segments except source segment F_RELAY = 1 << 5, // relay message to all peers in the same segment F_SEGMENT_RELAY = 1 << 6 }; enum Type { GMCAST_T_INVALID = 0, GMCAST_T_HANDSHAKE = 1, GMCAST_T_HANDSHAKE_RESPONSE = 2, GMCAST_T_OK = 3, GMCAST_T_FAIL = 4, GMCAST_T_TOPOLOGY_CHANGE = 5, GMCAST_T_KEEPALIVE = 6, /* Leave room for future use */ GMCAST_T_USER_BASE = 8, GMCAST_T_MAX = 255 }; class NodeList : public Map { }; private: gu::byte_t version_; Type type_; gu::byte_t flags_; gu::byte_t segment_id_; gcomm::UUID handshake_uuid_; gcomm::UUID source_uuid_; gcomm::String<64> node_address_or_error_; gcomm::String<32> group_name_; Message& operator=(const Message&); NodeList node_list_; public: static const char* type_to_string (Type t) { static const char* str[GMCAST_T_MAX] = { "INVALID", "HANDSHAKE", "HANDSHAKE_RESPONSE", "HANDSHAKE_OK", "HANDSHAKE_FAIL", "TOPOLOGY_CHANGE", "KEEPALIVE", "RESERVED_7", "USER_BASE" }; if (GMCAST_T_MAX > t) return str[t]; return "UNDEFINED PACKET TYPE"; } Message(const Message& msg) : version_ (msg.version_), type_ (msg.type_), flags_ (msg.flags_), segment_id_ (msg.segment_id_), handshake_uuid_ (msg.handshake_uuid_), source_uuid_ (msg.source_uuid_), node_address_or_error_ (msg.node_address_or_error_), group_name_ (msg.group_name_), node_list_ (msg.node_list_) { } /* Default ctor */ Message () : version_ (0), type_ (GMCAST_T_INVALID), flags_ (0), segment_id_ (0), handshake_uuid_ (), source_uuid_ (), node_address_or_error_ (), group_name_ (), node_list_ () {} /* Ctor for handshake */ Message (int version, const Type type, const UUID& handshake_uuid, const UUID& source_uuid, uint8_t segment_id) : version_ (version), type_ (type), flags_ (F_HANDSHAKE_UUID), segment_id_ (segment_id), handshake_uuid_ (handshake_uuid), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (), node_list_ () { if (type_ != GMCAST_T_HANDSHAKE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in handshake constructor"; } /* ok, fail and keepalive */ Message (int version, const Type type, const UUID& source_uuid, uint8_t segment_id, const std::string& error) : version_ (version), type_ (type), flags_ (error.size() > 0 ? F_NODE_ADDRESS_OR_ERROR : 0), segment_id_ (segment_id), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (error), group_name_ (), node_list_ () { if (type_ != GMCAST_T_OK && type_ != GMCAST_T_FAIL && type_ != GMCAST_T_KEEPALIVE) { gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in ok/fail/keepalive constructor"; } } /* Ctor for user message */ Message (int version, const Type type, const UUID& source_uuid, const int ttl, uint8_t segment_id) : version_ (version), type_ (type), flags_ (0), segment_id_ (segment_id), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (), node_list_ () { if (type_ < GMCAST_T_USER_BASE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in user message constructor"; } /* Ctor for handshake response */ Message (int version, const Type type, const gcomm::UUID& handshake_uuid, const gcomm::UUID& source_uuid, const std::string& node_address, const std::string& group_name, uint8_t segment_id) : version_ (version), type_ (type), flags_ (F_GROUP_NAME | F_NODE_ADDRESS_OR_ERROR | F_HANDSHAKE_UUID), segment_id_ (segment_id), handshake_uuid_ (handshake_uuid), source_uuid_ (source_uuid), node_address_or_error_ (node_address), group_name_ (group_name), node_list_ () { if (type_ != GMCAST_T_HANDSHAKE_RESPONSE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in handshake response constructor"; } /* Ctor for topology change */ Message (int version, const Type type, const gcomm::UUID& source_uuid, const std::string& group_name, const NodeList& nodes) : version_ (version), type_ (type), flags_ (F_GROUP_NAME | F_NODE_LIST), segment_id_ (0), handshake_uuid_ (), source_uuid_ (source_uuid), node_address_or_error_ (), group_name_ (group_name), node_list_ (nodes) { if (type_ != GMCAST_T_TOPOLOGY_CHANGE) gu_throw_fatal << "Invalid message type " << type_to_string(type_) << " in topology change constructor"; } ~Message() { } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; gu_trace (off = gu::serialize1(version_, buf, buflen, offset)); gu_trace (off = gu::serialize1(static_cast(type_),buf,buflen,off)); gu_trace (off = gu::serialize1(flags_, buf, buflen, off)); gu_trace (off = gu::serialize1(segment_id_, buf, buflen, off)); gu_trace (off = source_uuid_.serialize(buf, buflen, off)); if (flags_ & F_HANDSHAKE_UUID) { gu_trace(off = handshake_uuid_.serialize(buf, buflen, off)); } if (flags_ & F_NODE_ADDRESS_OR_ERROR) { gu_trace (off = node_address_or_error_.serialize(buf, buflen, off)); } if (flags_ & F_GROUP_NAME) { gu_trace (off = group_name_.serialize(buf, buflen, off)); } if (flags_ & F_NODE_LIST) { gu_trace(off = node_list_.serialize(buf, buflen, off)); } return off; } size_t read_v0(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu::byte_t t; gu_trace (off = gu::unserialize1(buf, buflen, offset, t)); type_ = static_cast(t); switch (type_) { case GMCAST_T_HANDSHAKE: case GMCAST_T_HANDSHAKE_RESPONSE: case GMCAST_T_OK: case GMCAST_T_FAIL: case GMCAST_T_TOPOLOGY_CHANGE: case GMCAST_T_KEEPALIVE: case GMCAST_T_USER_BASE: break; default: gu_throw_error(EINVAL) << "invalid message type " << static_cast(type_); } gu_trace (off = gu::unserialize1(buf, buflen, off, flags_)); gu_trace (off = gu::unserialize1(buf, buflen, off, segment_id_)); gu_trace (off = source_uuid_.unserialize(buf, buflen, off)); if (flags_ & F_HANDSHAKE_UUID) { gu_trace(off = handshake_uuid_.unserialize(buf, buflen, off)); } if (flags_ & F_NODE_ADDRESS_OR_ERROR) { gu_trace (off = node_address_or_error_.unserialize(buf, buflen, off)); } if (flags_ & F_GROUP_NAME) { gu_trace (off = group_name_.unserialize(buf, buflen, off)); } if (flags_ & F_NODE_LIST) { gu_trace(off = node_list_.unserialize(buf, buflen, off)); } return off; } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu_trace (off = gu::unserialize1(buf, buflen, offset, version_)); switch (version_) { case 0: gu_trace (return read_v0(buf, buflen, off)); default: gu_throw_error(EPROTONOSUPPORT) << "Unsupported/unrecognized gmcast protocol version: " << version_; } } size_t serial_size() const { return 4 /* Common header: version, type, flags, segment_id */ + source_uuid_.serial_size() + (flags_ & F_HANDSHAKE_UUID ? handshake_uuid_.serial_size() : 0) /* GMCast address if set */ + (flags_ & F_NODE_ADDRESS_OR_ERROR ? node_address_or_error_.serial_size() : 0) /* Group name if set */ + (flags_ & F_GROUP_NAME ? group_name_.serial_size() : 0) /* Node list if set */ + (flags_ & F_NODE_LIST ? node_list_.serial_size() : 0); } int version() const { return version_; } Type type() const { return type_; } void set_flags(uint8_t f) { flags_ = f; } uint8_t flags() const { return flags_; } uint8_t segment_id() const { return segment_id_; } const UUID& handshake_uuid() const { return handshake_uuid_; } const UUID& source_uuid() const { return source_uuid_; } const std::string& node_address() const { return node_address_or_error_.to_string(); } const std::string& error() const { return node_address_or_error_.to_string(); } const std::string& group_name() const { return group_name_.to_string(); } const NodeList& node_list() const { return node_list_; } }; #endif // GCOMM_GMCAST_MESSAGE_HPP galera-4-26.4.25/gcomm/src/evs_message2.hpp000644 000164 177776 00000056161 15107057155 021512 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy */ #ifndef EVS_MESSAGE2_HPP #define EVS_MESSAGE2_HPP #include "gcomm/order.hpp" #include "gcomm/view.hpp" #include "gcomm/map.hpp" #include "evs_seqno.hpp" #include "protocol_version.hpp" #include "gu_datetime.hpp" #include "gu_convert.hpp" namespace gcomm { namespace evs { class MessageNode; std::ostream& operator<<(std::ostream&, const MessageNode&); class MessageNodeList; class Message; std::ostream& operator<<(std::ostream&, const Message&); class UserMessage; class AggregateMessage; std::ostream& operator<<(std::ostream&, const AggregateMessage&); class DelegateMessage; class GapMessage; class JoinMessage; class LeaveMessage; class InstallMessage; class DelayedListMessage; class SelectNodesOp; class RangeLuCmp; class RangeHsCmp; } } class gcomm::evs::MessageNode { public: MessageNode(const bool operational = false, const bool suspected = false, const SegmentId segment = 0, const bool evicted = false, const seqno_t leave_seq = -1, const ViewId& view_id = ViewId(V_REG), const seqno_t safe_seq = -1, const Range im_range = Range()) : operational_(operational), suspected_ (suspected ), segment_ (segment ), evicted_ (evicted ), leave_seq_ (leave_seq ), view_id_ (view_id ), safe_seq_ (safe_seq ), im_range_ (im_range ) { } MessageNode(const MessageNode& mn) : operational_ (mn.operational_), suspected_ (mn.suspected_ ), segment_ (mn.segment_ ), evicted_ (mn.evicted_ ), leave_seq_ (mn.leave_seq_ ), view_id_ (mn.view_id_ ), safe_seq_ (mn.safe_seq_ ), im_range_ (mn.im_range_ ) { } MessageNode& operator=(const MessageNode& other) = default; bool operational() const { return operational_ ; } bool suspected() const { return suspected_ ; } bool evicted() const { return evicted_ ; } bool leaving() const { return (leave_seq_ != -1) ; } seqno_t leave_seq() const { return leave_seq_ ; } const ViewId& view_id() const { return view_id_ ; } seqno_t safe_seq() const { return safe_seq_ ; } Range im_range() const { return im_range_ ; } SegmentId segment() const { return segment_ ; } bool operator==(const MessageNode& cmp) const { return (operational_ == cmp.operational_ && suspected_ == cmp.suspected_ && leave_seq_ == cmp.leave_seq_ && view_id_ == cmp.view_id_ && safe_seq_ == cmp.safe_seq_ && im_range_ == cmp.im_range_); } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); static size_t serial_size(); private: enum { F_OPERATIONAL = 1 << 0, F_SUSPECTED = 1 << 1, F_EVICTED = 1 << 2 }; bool operational_; // Is operational bool suspected_; SegmentId segment_; bool evicted_; // Evicted out of the cluster seqno_t leave_seq_; ViewId view_id_; // Current view as seen by source of this message seqno_t safe_seq_; // Safe seq as seen... Range im_range_; // Input map range as seen... }; class gcomm::evs::MessageNodeList : public gcomm::Map { }; /*! * EVS message class */ class gcomm::evs::Message { public: enum Type { EVS_T_NONE = 0, EVS_T_USER = 1, /*!< User generated message */ EVS_T_DELEGATE = 2, /*!< Delegate message */ EVS_T_GAP = 3, /*!< Gap message */ EVS_T_JOIN = 4, /*!< Join message */ EVS_T_INSTALL = 5, /*!< Install message */ EVS_T_LEAVE = 6, /*!< Leave message */ EVS_T_DELAYED_LIST = 7 /*!< Evict list message */ }; static const size_t num_message_types = EVS_T_DELAYED_LIST + 1; typedef std::map DelayedList; static const uint8_t F_MSG_MORE = 0x1; /*!< Sender has more messages to send */ static const uint8_t F_RETRANS = 0x2; /*!< Message is resent upon request */ /*! * @brief Message source has been set explicitly via set_source() */ static const uint8_t F_SOURCE = 0x4; static const uint8_t F_AGGREGATE= 0x8; /*!< Message contains aggregated payload */ static const uint8_t F_COMMIT = 0x10; static const uint8_t F_BC = 0x20;/*!< Message was sent in backward compatibility mode */ /*! * Get version of the message * * @return Version number */ uint8_t version() const { return version_; } /*! * Get type of the message * * @return Message type */ Type type() const { return type_; } /*! * Check wheter message is of membership type * * @return True if message is of membership type, otherwise false */ bool is_membership() const { return (type_ == EVS_T_JOIN || type_ == EVS_T_INSTALL || type_ == EVS_T_LEAVE || type_ == EVS_T_DELAYED_LIST); } /*! * Get user type of the message. This is applicable only for * messages of type EVS_T_USER. * * @return User type of the message. */ uint8_t user_type() const { return user_type_; } /*! * Get message order type. * * @return Order type of the message. */ Order order() const { return order_; } /*! * Get sequence number associated to the message. * * @return Const reference to sequence number associated to the message. */ seqno_t seq() const { return seq_; } /*! * Get sequence numer range associated to the message. * * @return Sequence number range associated to the message. */ seqno_t seq_range() const { return seq_range_; } /*! * Get all-received-upto sequence number associated the the message. * * @return All-received-upto sequence number associated to the message. */ seqno_t aru_seq() const { return aru_seq_; } void set_flags(uint8_t flags) { flags_ = flags; } /*! * Get message flags. * * @return Message flags. */ uint8_t flags() const { return flags_; } /*! * Set message source * * @param uuid Source node uuid */ void set_source(const UUID& uuid) { source_ = uuid; flags_ |= F_SOURCE; } /*! * Get message source UUID. * * @return Message source UUID. */ const UUID& source() const { return source_; } /*! * Get message source view id, view where the message was originated * from. * * @return Message source view id. */ const gcomm::ViewId& source_view_id() const { return source_view_id_; } const gcomm::ViewId& install_view_id() const { return install_view_id_; } /*! * Get range UUID associated to the message. * * @return Range UUID associated to the message. */ const UUID& range_uuid() const { return range_uuid_; } /*! * Get range associated to the message. * * @return Range associated to the message. */ Range range() const { return range_; } /*! * Get fifo sequence number associated to the message. This is * applicable only for messages of membership type. * * @return Fifo sequence number associated to the message. */ int64_t fifo_seq() const { return fifo_seq_; } /*! * Get message node list. * * @return Const reference to message node list. */ const MessageNodeList& node_list() const { return node_list_; } /*! * Get timestamp associated to the message. */ gu::datetime::Date tstamp() const { return tstamp_; } /* Read type from message buffer. */ static Type get_type(const gu::byte_t* buf, size_t buflen, size_t offset); /* Unserialize common header. */ size_t unserialize_common(const gu::byte_t* buf, size_t buflen, size_t offset); /* Unserialize message. */ virtual size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) = 0; bool operator==(const Message& cmp) const; /*! * Copy constructor. */ Message(const Message& msg) : version_ (msg.version_), type_ (msg.type_), user_type_ (msg.user_type_), order_ (msg.order_), seq_ (msg.seq_), seq_range_ (msg.seq_range_), aru_seq_ (msg.aru_seq_), fifo_seq_ (msg.fifo_seq_), flags_ (msg.flags_), source_ (msg.source_), source_view_id_ (msg.source_view_id_), install_view_id_ (msg.install_view_id_), range_uuid_ (msg.range_uuid_), range_ (msg.range_), tstamp_ (msg.tstamp_), node_list_ (msg.node_list_), delayed_list_ (msg.delayed_list_) { } Message& operator=(const Message& msg) { version_ = msg.version_; type_ = msg.type_; user_type_ = msg.user_type_; order_ = msg.order_; seq_ = msg.seq_; seq_range_ = msg.seq_range_; aru_seq_ = msg.aru_seq_; fifo_seq_ = msg.fifo_seq_; flags_ = msg.flags_; source_ = msg.source_; source_view_id_ = msg.source_view_id_; install_view_id_ = msg.install_view_id_; range_uuid_ = msg.range_uuid_; range_ = msg.range_; tstamp_ = msg.tstamp_; node_list_ = msg.node_list_; delayed_list_ = msg.delayed_list_; return *this; } virtual ~Message() { } /*! Default constructor */ Message(const uint8_t version = 0, const Type type = EVS_T_NONE, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const ViewId& install_view_id = ViewId(), const uint8_t user_type = 0xff, const Order order = O_DROP, const int64_t fifo_seq = -1, const seqno_t seq = -1, const seqno_t seq_range = -1, const seqno_t aru_seq = -1, const uint8_t flags = 0, const UUID& range_uuid = UUID(), const Range range = Range(), const MessageNodeList& node_list = MessageNodeList()) : version_ (version), type_ (type), user_type_ (user_type), order_ (order), seq_ (seq), seq_range_ (seq_range), aru_seq_ (aru_seq), fifo_seq_ (fifo_seq), flags_ (flags), source_ (source), source_view_id_ (source_view_id), install_view_id_ (install_view_id), range_uuid_ (range_uuid), range_ (range), tstamp_ (gu::datetime::Date::monotonic()), node_list_ (node_list), delayed_list_ () { } protected: size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t serial_size() const; // Version number: // For User, Gap, Leave messages that are exchanged only within a group // the version is minimum commonly supported version among the group, // computed during GATHER phase. // For Join, Install messages version is maximum supported protocol // version by the joiner. uint8_t version_; Type type_; uint8_t user_type_; Order order_; seqno_t seq_; seqno_t seq_range_; seqno_t aru_seq_; int64_t fifo_seq_; uint8_t flags_; UUID source_; ViewId source_view_id_; ViewId install_view_id_; UUID range_uuid_; Range range_; gu::datetime::Date tstamp_; MessageNodeList node_list_; DelayedList delayed_list_; }; /*! * User message class. */ class gcomm::evs::UserMessage : public Message { public: UserMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const seqno_t seq_range = 0, const Order order = O_SAFE, const int64_t fifo_seq = -1, const uint8_t user_type = 0xff, const uint8_t flags = 0) : Message(version, Message::EVS_T_USER, source, source_view_id, ViewId(), user_type, order, fifo_seq, seq, seq_range, aru_seq, flags, UUID(), Range()) { } void set_aru_seq(const seqno_t as) { aru_seq_ = as; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::AggregateMessage { public: AggregateMessage(const int flags = 0, const size_t len = 0, const uint8_t user_type = 0xff) : flags_ (gu::convert(flags, uint8_t(0))), user_type_(user_type), len_ (gu::convert(len, uint16_t(0))) { } int flags() const { return flags_; } size_t len() const { return len_; } uint8_t user_type() const { return user_type_; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); size_t serial_size() const; bool operator==(const AggregateMessage& cmp) const { return (flags_ == cmp.flags_ && len_ == cmp.len_ && user_type_ == cmp.user_type_); } private: uint8_t flags_; uint8_t user_type_; uint16_t len_; }; inline std::ostream& gcomm::evs::operator<<(std::ostream& os, const AggregateMessage& am) { return (os << "{flags=" << am.flags() << ",len=" << am.len() << "}"); } class gcomm::evs::DelegateMessage : public Message { public: DelegateMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const int64_t fifo_seq = -1) : Message(version, EVS_T_DELEGATE, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::GapMessage : public Message { public: GapMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const UUID& range_uuid = UUID::nil(), const Range range = Range(), const uint8_t flags = 0) : Message(version, EVS_T_GAP, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, flags, range_uuid, range) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::JoinMessage : public Message { public: JoinMessage(const int max_version = 0, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const MessageNodeList& node_list = MessageNodeList()) : Message(max_version, Message::EVS_T_JOIN, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, 0, UUID(), Range(), node_list) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::InstallMessage : public Message { public: InstallMessage(const int max_version = 0, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const ViewId& install_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const MessageNodeList& node_list = MessageNodeList()) : Message(max_version, Message::EVS_T_INSTALL, source, source_view_id, install_view_id, 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, F_SOURCE, UUID(), Range(), node_list) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::LeaveMessage : public Message { public: LeaveMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t seq = -1, const seqno_t aru_seq = -1, const int64_t fifo_seq = -1, const uint8_t flags = 0) : Message(version, EVS_T_LEAVE, source, source_view_id, ViewId(), 0xff, O_UNRELIABLE, fifo_seq, seq, -1, aru_seq, flags) { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; }; class gcomm::evs::DelayedListMessage : public Message { public: DelayedListMessage(const int version = -1, const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const seqno_t fifo_seq = -1) : Message(version, EVS_T_DELAYED_LIST, source, source_view_id, ViewId(), 0xff, O_DROP, fifo_seq) { } void add(const UUID& uuid, uint16_t cnt) { delayed_list_.insert(std::make_pair(uuid, cnt)); } const DelayedList& delayed_list() const { return delayed_list_; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) override; size_t serial_size() const; bool operator==(const DelayedListMessage& cmp) const { return (delayed_list_ == cmp.delayed_list_); } private: }; class gcomm::evs::SelectNodesOp { public: SelectNodesOp(MessageNodeList& nl, const gcomm::ViewId& view_id, const bool operational, const bool leaving) : nl_ (nl), view_id_ (view_id), operational_ (operational), leaving_ (leaving) { } void operator()(const MessageNodeList::value_type& vt) const { const MessageNode& node(MessageNodeList::value(vt)); if ((view_id_ == ViewId() || node.view_id() == view_id_ ) && ((operational_ == true && leaving_ == true ) || (node.operational() == operational_ && node.leaving() == leaving_ ) ) ) { nl_.insert_unique(vt); } } private: MessageNodeList& nl_; ViewId const view_id_; bool const operational_; bool const leaving_; }; class gcomm::evs::RangeLuCmp { public: bool operator()(const MessageNodeList::value_type& a, const MessageNodeList::value_type& b) const { gcomm_assert(MessageNodeList::value(a).view_id() == MessageNodeList::value(b).view_id()); return (MessageNodeList::value(a).im_range().lu() < MessageNodeList::value(b).im_range().lu()); } }; class gcomm::evs::RangeHsCmp { public: bool operator()(const MessageNodeList::value_type& a, const MessageNodeList::value_type& b) const { gcomm_assert(MessageNodeList::value(a).view_id() == MessageNodeList::value(b).view_id()); return (MessageNodeList::value(a).im_range().hs() < MessageNodeList::value(b).im_range().hs()); } }; #endif // EVS_MESSAGE2_HPP galera-4-26.4.25/gcomm/src/CMakeLists.txt000644 000164 177776 00000001111 15107057155 021137 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_library(gcomm STATIC asio_protonet.cpp asio_tcp.cpp asio_udp.cpp conf.cpp defaults.cpp datagram.cpp evs_consensus.cpp evs_input_map2.cpp evs_message2.cpp evs_node.cpp evs_proto.cpp gmcast.cpp gmcast_proto.cpp pc.cpp pc_proto.cpp protonet.cpp protostack.cpp transport.cpp uuid.cpp view.cpp socket.cpp ) # TODO: Fix these. target_compile_options(gcomm PRIVATE -Wno-unused-parameter -Wno-conversion -Wno-overloaded-virtual ) target_link_libraries(gcomm galerautilsxx) galera-4-26.4.25/gcomm/src/evs_message2.cpp000644 000164 177776 00000050204 15107057155 021475 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy * * $Id$ */ #include "evs_message2.hpp" #include "gu_exception.hpp" #include "gu_logger.hpp" std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::MessageNode& node) { os << " {"; os << "o=" << node.operational() << ","; os << "s=" << node.suspected() << ","; os << "e=" << node.evicted() << ","; os << "ls=" << node.leave_seq() << ","; os << "vid=" << node.view_id() << ","; os << "ss=" << node.safe_seq() << ","; os << "ir=" << node.im_range() << ","; os << "}"; return os; } static const char* const msg_type_str[] = { "NONE", "USER", "DELEGATE", "GAP", "JOIN", "INSTALL", "LEAVE", "DELAYED_LIST" }; static const char* msg_type_to_str(gcomm::evs::Message::Type type) { if (type < gcomm::evs::Message::EVS_T_NONE || type > gcomm::evs::Message::EVS_T_DELAYED_LIST) { return "UNKNOWN"; } return msg_type_str[static_cast(type)]; } std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Message& msg) { os << "{"; os << "v=" << static_cast(msg.version()) << ","; os << "t=" << msg_type_to_str(msg.type()) << ","; os << "ut=" << static_cast(msg.user_type()) << ","; os << "o=" << msg.order() << ","; os << "s=" << msg.seq() << ","; os << "sr=" << msg.seq_range() << ","; os << "as=" << msg.aru_seq() << ","; os << "f=" << static_cast(msg.flags()) << ","; os << "src=" << msg.source() << ","; os << "srcvid=" << msg.source_view_id() << ","; os << "insvid=" << msg.install_view_id() << ","; os << "ru=" << msg.range_uuid() << ","; os << "r=" << msg.range() << ","; os << "fs=" << msg.fifo_seq() << ","; os << "nl=(\n" << msg.node_list() << ")\n"; os << "}"; return os; } size_t gcomm::evs::MessageNode::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { uint8_t b = static_cast((operational_ == true ? F_OPERATIONAL : 0) | (suspected_ == true ? F_SUSPECTED : 0) | (evicted_ == true ? F_EVICTED : 0)); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize1(segment_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(leave_seq_, buf, buflen, offset)); gu_trace(offset = view_id_.serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(safe_seq_, buf, buflen, offset)); gu_trace(offset = im_range_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::MessageNode::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); if ((b & ~(F_OPERATIONAL | F_SUSPECTED | F_EVICTED)) != 0) { log_warn << "unknown flags: " << static_cast(b); } operational_ = b & F_OPERATIONAL; suspected_ = b & F_SUSPECTED; evicted_ = b & F_EVICTED; gu_trace(offset = gu::unserialize1(buf, buflen, offset, segment_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, leave_seq_)); gu_trace(offset = view_id_.unserialize(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, safe_seq_)); gu_trace(offset = im_range_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::MessageNode::serial_size() { return 2 + // 4 bytes reserved for flags sizeof(seqno_t) + ViewId::serial_size() + sizeof(seqno_t) + Range::serial_size(); } bool gcomm::evs::Message::operator==(const Message& cmp) const { return (version_ == cmp.version_ && type_ == cmp.type_ && user_type_ == cmp.user_type_ && order_ == cmp.order_ && seq_ == cmp.seq_ && seq_range_ == cmp.seq_range_ && aru_seq_ == cmp.aru_seq_ && fifo_seq_ == cmp.fifo_seq_ && flags_ == cmp.flags_ && source_ == cmp.source_ && source_view_id_ == cmp.source_view_id_ && install_view_id_ == cmp.install_view_id_ && range_uuid_ == cmp.range_uuid_ && range_ == cmp.range_ && node_list_ == cmp.node_list_); } // // Header format: // 0 1 2 3 // | 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 | // |-----------------------------------------------------------------| // | zv | t | o | flags | real version | reserved | // |-----------------------------------------------------------------| // | fifo_seq | // | ... | // |-----------------------------------------------------------------| // | source | // | ... | // | ... | // | ... | // |-----------------------------------------------------------------| // |-----------------------------------------------------------------| // | source view id | // | ... | // | ... | // | ... | // | ... | // |-----------------------------------------------------------------| // // // zv - zeroversion // if zeroversion is 0, message version is 0, otherwise it is // read from real version // t - type // o - order // size_t gcomm::evs::Message::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { uint8_t zeroversion; switch (type_) { case EVS_T_JOIN: case EVS_T_INSTALL: zeroversion = 0; break; default: zeroversion = (version_ != 0 ? 1 : 0); } uint8_t b = static_cast(zeroversion | (type_ << 2) | (order_ << 5)); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize1(flags_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(version_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(uint8_t(0), buf, buflen, offset)); gu_trace(offset = gu::serialize8(fifo_seq_, buf, buflen, offset)); if (flags_ & F_SOURCE) { gu_trace(offset = source_.serialize(buf, buflen, offset)); } gu_trace(offset = source_view_id_.serialize(buf, buflen, offset)); return offset; } gcomm::evs::Message::Type gcomm::evs::Message::get_type(const gu::byte_t* buf, size_t buflen, size_t offset) { uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); return static_cast((b >> 2) & 0x7); } size_t gcomm::evs::Message::unserialize_common(const gu::byte_t* const buf, size_t const buflen, size_t offset) { uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); // The message version will be read from offset 16 regardless what is // the zeroversion value. The only purpose of zeroversion is to // make pre 3.8 nodes to discard messages in new format. type_ = static_cast((b >> 2) & 0x7); if (type_ <= EVS_T_NONE || type_ > EVS_T_DELAYED_LIST) { gu_throw_error(EINVAL) << "invalid type " << type_; } order_ = static_cast((b >> 5) & 0x7); if (order_ < O_DROP || order_ > O_SAFE) { gu_throw_error(EINVAL) << "invalid safety prefix " << order_; } gu_trace(offset = gu::unserialize1(buf, buflen, offset, flags_)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, version_)); switch (type_) { case EVS_T_JOIN: case EVS_T_INSTALL: // Join and install message will always remain protocol zero, // version check is not applicable. break; default: if (version_ > GCOMM_PROTOCOL_MAX_VERSION) { gu_throw_error(EPROTONOSUPPORT) << "protocol version " << static_cast(version_) << " not supported"; } break; } uint8_t reserved; gu_trace(offset = gu::unserialize1(buf, buflen, offset, reserved)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, fifo_seq_)); if (flags_ & F_SOURCE) { gu_trace(offset = source_.unserialize(buf, buflen, offset)); } gu_trace(offset = source_view_id_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::Message::serial_size() const { return (1 + // version | type | order 1 + // flags 2 + // pad sizeof(fifo_seq_) + // fifo_seq ((flags_ & F_SOURCE) ? UUID::serial_size() : 0) + ViewId::serial_size()); // source_view_id } size_t gcomm::evs::UserMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(user_type_, buf, buflen, offset)); gcomm_assert(seq_range_ <= seqno_t(0xff)); uint8_t b = static_cast(seq_range_); gu_trace(offset = gu::serialize1(b, buf, buflen, offset)); gu_trace(offset = gu::serialize2(uint16_t(0), buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); return offset; } size_t gcomm::evs::UserMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, user_type_)); uint8_t b; gu_trace(offset = gu::unserialize1(buf, buflen, offset, b)); seq_range_ = b; uint16_t pad; gu_trace(offset = gu::unserialize2(buf, buflen, offset, pad)); if (pad != 0) { log_warn << "invalid pad: " << pad; } gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); return offset; } size_t gcomm::evs::UserMessage::serial_size() const { return Message::serial_size() + // Header 1 + // User type 1 + // Seq range 2 + // Pad/reserved sizeof(seqno_t) + // Seq sizeof(seqno_t); // Aru seq } size_t gcomm::evs::AggregateMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = gu::serialize1(flags_, buf, buflen, offset)); gu_trace(offset = gu::serialize1(user_type_, buf, buflen, offset)); gu_trace(offset = gu::serialize2(len_, buf, buflen, offset)); return offset; } size_t gcomm::evs::AggregateMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = gu::unserialize1(buf, buflen, offset, flags_)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, user_type_)); gu_trace(offset = gu::unserialize2(buf, buflen, offset, len_)); return offset; } size_t gcomm::evs::AggregateMessage::serial_size() const { return sizeof(flags_) + sizeof(len_) + sizeof(user_type_); } size_t gcomm::evs::DelegateMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::DelegateMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); return offset; } size_t gcomm::evs::DelegateMessage::serial_size() const { return Message::serial_size(); } size_t gcomm::evs::GapMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = range_uuid_.serialize(buf, buflen, offset)); gu_trace(offset = range_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::GapMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); gu_trace(offset = range_uuid_.unserialize(buf, buflen, offset)); gu_trace(offset = range_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::GapMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + UUID::serial_size() + Range::serial_size()); } size_t gcomm::evs::JoinMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = node_list_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::JoinMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); node_list_.clear(); gu_trace(offset = node_list_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::JoinMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + node_list_.serial_size()); } size_t gcomm::evs::InstallMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); gu_trace(offset = install_view_id_.serialize(buf, buflen, offset)); gu_trace(offset = node_list_.serialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::InstallMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); gu_trace(offset = install_view_id_.unserialize(buf, buflen, offset)); node_list_.clear(); gu_trace(offset = node_list_.unserialize(buf, buflen, offset)); return offset; } size_t gcomm::evs::InstallMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t) + ViewId::serial_size() + node_list_.serial_size()); } size_t gcomm::evs::LeaveMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize8(seq_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(aru_seq_, buf, buflen, offset)); return offset; } size_t gcomm::evs::LeaveMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, seq_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, aru_seq_)); return offset; } size_t gcomm::evs::LeaveMessage::serial_size() const { return (Message::serial_size() + 2 * sizeof(seqno_t)); } size_t gcomm::evs::DelayedListMessage::serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = Message::serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(static_cast(delayed_list_.size()), buf, buflen, offset)); for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { gu_trace(offset = i->first.serialize(buf, buflen, offset)); gu_trace(offset = gu::serialize1(i->second, buf, buflen, offset)); } return offset; } size_t gcomm::evs::DelayedListMessage::unserialize(const gu::byte_t* const buf, size_t const buflen, size_t offset) { gu_trace(offset = Message::unserialize_common(buf, buflen, offset)); delayed_list_.clear(); uint8_t list_sz(0); gu_trace(offset = gu::unserialize1(buf, buflen, offset, list_sz)); for (uint8_t i(0); i < list_sz; ++i) { UUID uuid; uint8_t cnt; gu_trace(offset = uuid.unserialize(buf, buflen, offset)); gu_trace(offset = gu::unserialize1(buf, buflen, offset, cnt)); delayed_list_.insert(std::make_pair(uuid, cnt)); } return offset; } size_t gcomm::evs::DelayedListMessage::serial_size() const { return (Message::serial_size() + gu::serial_size(uint8_t(0)) + std::min( delayed_list_.size(), static_cast(std::numeric_limits::max())) * (UUID::serial_size() + gu::serial_size(uint8_t(0)))); } galera-4-26.4.25/gcomm/src/evs_proto.hpp000644 000164 177776 00000051047 15107057155 021145 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy */ /*! * @file evs_proto.hpp * * @brief EVS protocol implementation header. */ #ifndef GCOMM_EVS_PROTO_HPP #define GCOMM_EVS_PROTO_HPP #include "gcomm/protolay.hpp" #include "gcomm/view.hpp" #include "gcomm/transport.hpp" #include "gcomm/map.hpp" #include "gu_histogram.hpp" #include "gu_stats.hpp" #include "evs_seqno.hpp" #include "evs_node.hpp" #include "evs_consensus.hpp" #include "protocol_version.hpp" #include "gu_datetime.hpp" #include #include #include #include namespace gcomm { namespace evs { class Message; class MessageNodeList; class UserMessage; class DelegateMessage; class GapMessage; class JoinMessage; class InstallMessage; class LeaveMessage; class InputMap; class InputMapMsg; class Proto; std::ostream& operator<<(std::ostream&, const Proto&); // // Helper class for getting the location where // certain methods are called from. // // Example usage: // Method prototype: // void fun(EVS_CALLER_ARG, int a) // // Calling: // fun(EVS_CALLER, a) // // Logging inside function: // log_debug << EVS_LOG_METHOD << "log message" // class Caller { public: Caller(const char* const file, const int line) : file_(file), line_(line) { } friend std::ostream& operator<<(std::ostream&, const Caller&); private: const char* const file_; const int line_; }; inline std::ostream& operator<<(std::ostream& os, const Caller& caller) { return (os << caller.file_ << ": " << caller.line_ << ": "); } #define EVS_CALLER_ARG const Caller& caller #define EVS_CALLER Caller(__FILE__, __LINE__) #define EVS_LOG_METHOD __FUNCTION__ << " called from " << caller } } /*! * @brief Class implementing EVS protocol */ class gcomm::evs::Proto : public Protolay { public: enum State { S_CLOSED, S_JOINING, S_LEAVING, S_GATHER, S_INSTALL, S_OPERATIONAL, S_MAX }; static std::string to_string(const State s) { switch (s) { case S_CLOSED: return "CLOSED"; case S_JOINING: return "JOINING"; case S_LEAVING: return "LEAVING"; case S_GATHER: return "GATHER"; case S_INSTALL: return "INSTALL"; case S_OPERATIONAL: return "OPERATIONAL"; default: gu_throw_fatal << "Invalid state"; } } friend std::ostream& operator<<(std::ostream&, const Proto&); friend class Consensus; /*! * Default constructor. */ Proto(gu::Config& conf, const UUID& my_uuid, SegmentId segment, const gu::URI& uri = gu::URI("evs://"), const size_t mtu = std::numeric_limits::max(), const View* rst_view = NULL); ~Proto(); const UUID& uuid() const { return my_uuid_; } std::string self_string() const { std::ostringstream os; os << "evs::proto(" << uuid() << ", " << to_string(state()) << ", " << current_view_.id() << ")"; return os.str(); } State state() const { return state_; } size_t known_size() const { return known_.size(); } bool is_output_empty() const { return output_.empty(); } std::string stats() const; void reset_stats(); // Return true if the message with seqno and given send window will // cause flow control. bool is_flow_control(const seqno_t seqno, const seqno_t win) const; // Return true if sending the user message contained in dg // should make all nodes to respond to the message. This happens // if sending the datagram would cause some predefined (@todo name // variable here) number of bytes to be exceeded without sending // and user message without F_MSG_MORE flag. bool request_user_msg_feedback(const gcomm::Datagram& dg) const; int send_user(Datagram&, uint8_t, Order, seqno_t, seqno_t, size_t n_aggregated = 1); size_t mtu() const { return mtu_; } size_t aggregate_len() const; int send_user(const seqno_t); void complete_user(const seqno_t); int send_delegate(Datagram&, const UUID& target); bool gap_rate_limit(const UUID&, const Range&) const; // Send GAP message. // @param range_uuid If non-nil, the gap message will contain request for // retransmission of messages in given range. // @param view_id View ID the gap message belongs to. // @param range If non-empty denotes the range of messages to be resent // by the node with range_uuid // @param commit If set, the gap informs that the node will commit to the // proposed view in previously received install message. void send_gap(EVS_CALLER_ARG, const UUID& range_uuid, const ViewId& view_id, const Range range, bool commit = false); const JoinMessage& create_join(); bool join_rate_limit() const; void send_join(bool tval = true); void set_join(const JoinMessage&, const UUID&); void set_leave(const LeaveMessage&, const UUID&); void send_leave(bool handle = true); void send_install(EVS_CALLER_ARG); void send_delayed_list(); void resend(const UUID&, const Range); void recover(const UUID&, const UUID&, const Range); void retrans_leaves(const MessageNodeList&); void set_inactive(const UUID&); bool is_inactive(const UUID&) const; void check_inactive(); // Clean up foreign nodes according to install message. void cleanup_foreign(const InstallMessage&); void cleanup_views(); void cleanup_evicted(); void cleanup_joins(); size_t n_operational() const; void validate_reg_msg(const UserMessage&); void deliver_finish(const InputMapMsg&); void deliver(); void deliver_local(bool trans = false); void deliver_causal(uint8_t user_type, seqno_t seqno, const Datagram&); void validate_trans_msg(const UserMessage&); void deliver_trans(); void deliver_reg_view(const InstallMessage&, const View&); void deliver_trans_view(const InstallMessage&, const View&); void deliver_empty_view(); void setall_committed(bool val); bool is_all_committed() const; void setall_installed(bool val); bool is_all_installed() const; bool is_install_message() const { return install_message_ != 0; } bool is_representative(const UUID& pid) const; void shift_to(const State, const bool send_j = true); bool is_all_suspected(const UUID& uuid) const; const View& current_view() const { return current_view_; } // Message handlers private: /*! * Update input map safe seq * @param uuid Node uuid * @param seq Sequence number * @return Input map seqno before updating */ seqno_t update_im_safe_seq(const size_t uuid, const seqno_t seq); /*! * Update input map safe seqs according to message node list. Only * inactive nodes are allowed to be in */ bool update_im_safe_seqs(const MessageNodeList&); bool is_msg_from_previous_view(const Message&); void check_suspects(const UUID&, const MessageNodeList&); void cross_check_inactives(const UUID&, const MessageNodeList&); void check_unseen(); void check_nil_view_id(); void asymmetry_elimination(); void handle_foreign(const Message&); void send_request_retrans_gap(const UUID& target, const UUID& origin, const Range& range); // Request retransmission of messages. // @param target Target node to request messages from. // @param origin Origin of the range of messages to request. // @param range Seqno range to request void request_retrans(const UUID& target, const UUID& origin, const Range& range); // Request missing messages from nodes in the same view. // This method should be used only during configuration changes, // not in operational state. void request_missing(); // Retrans messages which may be missing from some nodes. This method // should used only during configuration changes, not in // operational state. void retrans_missing(); // Handle user message which has view id different from // current view ID. // @return True if the message must be processed void handle_user_from_different_view(const Node& node, const UserMessage& msg); void handle_user(const UserMessage&, NodeMap::iterator, const Datagram&); void handle_delegate(const DelegateMessage&, NodeMap::iterator, const Datagram&); void handle_gap(const GapMessage&, NodeMap::iterator); void handle_join(const JoinMessage&, NodeMap::iterator); void handle_leave(const LeaveMessage&, NodeMap::iterator); void handle_install(const InstallMessage&, NodeMap::iterator); void handle_delayed_list(const DelayedListMessage&, NodeMap::iterator); void populate_node_list(MessageNodeList*) const; void isolate(gu::datetime::Period period); public: static std::pair, size_t> unserialize_message(const UUID&, const Datagram&); void handle_msg(const Message& msg, const Datagram& dg = Datagram(), bool direct = true); // Protolay void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram& wb, const ProtoDownMeta& dm); int send_down(Datagram& dg, const ProtoDownMeta& dm); void handle_stable_view(const View& view) { set_stable_view(view); } void handle_fencing(const UUID& uuid) { } void connect(bool first) { gu_trace(shift_to(S_JOINING)); gu_trace(send_join(first)); } void close(bool force = false) { // shifting to S_LEAVING from S_INSTALL is troublesome, // instead of that raise a boolean flag to indicate that // shifting to S_LEAVING should be done once S_OPERATIONAL // is reached // // #760 - pending leave should be done also from S_GATHER, // changing state to S_LEAVING resets timers and may prevent // remaining nodes to reach new group until install timer // times out log_debug << self_string() << " closing in state " << state(); if (state() != S_GATHER && state() != S_INSTALL) { /* Leave message does not consume sequence number. Send a dummy * message to trigger message acknowledgement mechanism. This is a * small overhead but speeds up the leave process.*/ if (state() == S_OPERATIONAL) { Datagram wb; gu_trace(send_user(wb, 0xff, O_DROP, -1, -1)); } gu_trace(shift_to(S_LEAVING)); gu_trace(send_leave()); pending_leave_ = false; } else { pending_leave_ = true; } } void close(const UUID& uuid) { set_inactive(uuid); } bool set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb); void handle_get_status(gu::Status& status) const; // gu::datetime::Date functions do appropriate actions for timer handling // and return next expiration time private: public: enum Timer { T_INACTIVITY, T_RETRANS, T_INSTALL, T_STATS }; /*! * Internal timer list */ typedef MultiMap TimerList; private: TimerList timers_; public: // These need currently to be public for unit tests void handle_inactivity_timer(); void handle_retrans_timer(); void handle_install_timer(); void handle_stats_timer(); gu::datetime::Date next_expiration(const Timer) const; void reset_timer(Timer); void cancel_timer(Timer); gu::datetime::Date handle_timers(); /*! * @brief Flags controlling what debug information is logged if * debug logging is turned on. */ enum DebugFlags { D_STATE = 1 << 0, /*!< State changes */ D_TIMERS = 1 << 1, /*!< Timer handling */ D_CONSENSUS = 1 << 2, /*!< Consensus protocol */ D_USER_MSGS = 1 << 3, /*!< User messages */ D_DELEGATE_MSGS = 1 << 4, /*!< Delegate messages */ D_GAP_MSGS = 1 << 5, /*!< Gap messages */ D_JOIN_MSGS = 1 << 6, /*!< Join messages */ D_INSTALL_MSGS = 1 << 7, /*!< Install messages */ D_LEAVE_MSGS = 1 << 8, /*!< Leave messages */ D_FOREIGN_MSGS = 1 << 9, /*!< Foreing messages */ D_RETRANS = 1 << 10, /*!< Retransmitted/recovered messages */ D_DELIVERY = 1 << 11 /*!< Message delivery */ }; /*! * @brief Flags controlling what info log is printed in logs. */ enum InfoFlags { I_VIEWS = 1 << 0, /*!< View changes */ I_STATE = 1 << 1, /*!< State change information */ I_STATISTICS = 1 << 2, /*!< Statistics */ I_PROFILING = 1 << 3 /*!< Profiling information */ }; private: int version_; int debug_mask_; int info_mask_; gu::datetime::Date last_stats_report_; bool collect_stats_; gu::Histogram hs_agreed_; gu::Histogram hs_safe_; gu::Histogram hs_local_causal_; gu::Stats safe_deliv_latency_; long long int send_queue_s_; long long int n_send_queue_s_; std::vector sent_msgs_; long long int retrans_msgs_; long long int recovered_msgs_; std::vector recvd_msgs_; std::vector delivered_msgs_; bool delivering_; UUID my_uuid_; SegmentId segment_; // // Known instances friend class Node; friend class InspectNode; NodeMap known_; NodeMap::iterator self_i_; // gu::datetime::Period view_forget_timeout_; gu::datetime::Period inactive_timeout_; gu::datetime::Period suspect_timeout_; gu::datetime::Period inactive_check_period_; gu::datetime::Period retrans_period_; gu::datetime::Period install_timeout_; gu::datetime::Period join_retrans_period_; gu::datetime::Period stats_report_period_; gu::datetime::Period causal_keepalive_period_; gu::datetime::Period delay_margin_; gu::datetime::Period delayed_keep_period_; gu::datetime::Date last_inactive_check_; gu::datetime::Date last_causal_keepalive_; // Current view id // ViewId current_view; View current_view_; View previous_view_; typedef std::map ViewList; // List of previously seen views from which messages should not be // accepted anymore ViewList previous_views_; // Seen views in gather state, will be copied to previous views // when shifting to operational ViewList gather_views_; // Map containing received messages and aru/safe seqnos InputMap* input_map_; // Helper container for local causal messages class CausalMessage { public: CausalMessage(uint8_t user_type, seqno_t seqno, const Datagram& datagram) : user_type_(user_type), seqno_ (seqno ), datagram_ (datagram ), tstamp_ (gu::datetime::Date::monotonic()) { } uint8_t user_type() const { return user_type_; } seqno_t seqno() const { return seqno_ ; } const Datagram& datagram() const { return datagram_ ; } const gu::datetime::Date& tstamp() const { return tstamp_ ; } private: uint8_t user_type_; seqno_t seqno_; Datagram datagram_; gu::datetime::Date tstamp_; }; // Queue containing local causal messages std::deque causal_queue_; // Consensus module Consensus consensus_; // Last sent join tstamp gu::datetime::Date last_sent_join_tstamp_; // Last received install message InstallMessage* install_message_; // Highest seen view id seqno uint32_t max_view_id_seq_; // Install attempt counter uint32_t attempt_seq_; // Boolean to suppress logging when new view has been // detected bool new_view_logged_; // Install timeout counting int max_install_timeouts_; int install_timeout_count_; // Sequence number to maintain membership message FIFO order int64_t fifo_seq_; // Last sent seq seqno_t last_sent_; // Protocol send window size seqno_t send_window_; // User send window size seqno_t user_send_window_; // Bytes since the last user msg which will require feedback from // other nodes (i.e. sent without F_MSG_MORE) size_t bytes_since_request_user_msg_feedback_; // Output message queue. Class implemented as a thin wrapper // around std::deque<> with book keeping of outbound bytes. class out_queue { public: typedef std::deque > queue_type; typedef queue_type::const_iterator const_iterator; out_queue() : outbound_bytes_(), queue_() { } bool empty() const { assert(outbound_bytes_ || queue_.empty()); return (outbound_bytes_ == 0); } void push_back(const queue_type::value_type& msg) { outbound_bytes_ += msg.first.len(); queue_.push_back(msg); } void pop_front() { assert(not queue_.empty()); assert(outbound_bytes_ >= queue_.front().first.len()); outbound_bytes_ -= queue_.front().first.len(); queue_.pop_front(); } const queue_type::value_type& front() const { assert(not queue_.empty()); return queue_.front(); } const_iterator begin() const { return queue_.begin(); } const_iterator end() const { return queue_.end(); } size_t size() const { return queue_.size(); } void clear() { outbound_bytes_ = 0; queue_.clear(); } size_t outbound_bytes() const { return outbound_bytes_; } static const size_t max_outbound_bytes = (size_t(1) << 20); private: size_t outbound_bytes_; queue_type queue_; } output_; std::vector send_buf_; uint32_t max_output_size_; size_t mtu_; bool use_aggregate_; bool self_loopback_; State state_; int shift_to_rfcnt_; bool pending_leave_; gu::datetime::Date isolation_end_; class DelayedEntry { public: typedef enum { S_OK, S_DELAYED } State; DelayedEntry(const std::string& addr) : addr_ (addr), tstamp_(gu::datetime::Date::monotonic()), state_(S_DELAYED), state_change_cnt_(1) { } const std::string& addr() const { return addr_; } void set_tstamp(gu::datetime::Date tstamp) { tstamp_ = tstamp; } gu::datetime::Date tstamp() const { return tstamp_; } void set_state(State state, const gu::datetime::Period decay_period, const gu::datetime::Date now) { if (state == S_DELAYED && state_ != state) { // Limit to 0xff, see DelayedList format in DelayedListMessage // restricts this value to uint8_t max. if (state_change_cnt_ < 0xff) ++state_change_cnt_; } else if (state == S_OK && tstamp_ + decay_period < now) { if (state_change_cnt_ > 0) --state_change_cnt_; } state_ = state; } State state() const {return state_; } size_t state_change_cnt() const { return state_change_cnt_; } private: const std::string addr_; gu::datetime::Date tstamp_; State state_; size_t state_change_cnt_; }; typedef std::map DelayedList; DelayedList delayed_list_; size_t auto_evict_; // non-copyable Proto(const Proto&); void operator=(const Proto&); }; #endif // EVS_PROTO_HPP galera-4-26.4.25/gcomm/src/defaults.cpp000644 000164 177776 00000006510 15107057155 020722 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2012-2019 Codership Oy */ #include "defaults.hpp" #include "asio_tcp.hpp" #include "gcomm/common.hpp" namespace gcomm { std::string const Defaults::ProtonetBackend = "asio"; std::string const Defaults::ProtonetVersion = "0"; std::string const Defaults::SocketChecksum = "2"; std::string const Defaults::SocketRecvBufSize = GCOMM_ASIO_AUTO_BUF_SIZE; std::string const Defaults::SocketSendBufSize = GCOMM_ASIO_AUTO_BUF_SIZE; std::string const Defaults::GMCastVersion = "0"; std::string const Defaults::GMCastTcpPort = BASE_PORT_DEFAULT; std::string const Defaults::GMCastMCastTTL = "1"; std::string const Defaults::GMCastSegment = "0"; std::string const Defaults::GMCastTimeWait = "PT5S"; std::string const Defaults::GMCastPeerTimeout = "PT3S"; std::string const Defaults::EvsViewForgetTimeout = "PT24H"; std::string const Defaults::EvsViewForgetTimeoutMin = "PT1S"; std::string const Defaults::EvsInactiveCheckPeriod = "PT0.5S"; std::string const Defaults::EvsSuspectTimeout = "PT5S"; std::string const Defaults::EvsSuspectTimeoutMin = "PT0.1S"; std::string const Defaults::EvsInactiveTimeout = "PT15S"; std::string const Defaults::EvsInactiveTimeoutMin = "PT0.1S"; std::string const Defaults::EvsKeepalivePeriod = "PT1S"; std::string const Defaults::EvsCausalKeepalivePeriod= Defaults::EvsKeepalivePeriod; std::string const Defaults::EvsKeepalivePeriodMin = "PT0.1S"; std::string const Defaults::EvsJoinRetransPeriod = "PT1S"; std::string const Defaults::EvsJoinRetransPeriodMin = "PT0.1S"; std::string const Defaults::EvsStatsReportPeriod = "PT1M"; std::string const Defaults::EvsStatsReportPeriodMin = "PT1S"; std::string const Defaults::EvsDebugLogMask = "0x1"; std::string const Defaults::EvsInfoLogMask = "0"; std::string const Defaults::EvsSendWindow = "4"; std::string const Defaults::EvsSendWindowMin = "1"; std::string const Defaults::EvsUserSendWindow = "2"; std::string const Defaults::EvsUserSendWindowMin = "1"; std::string const Defaults::EvsMaxInstallTimeouts = "3"; std::string const Defaults::EvsDelayMargin = "PT1S"; std::string const Defaults::EvsDelayedKeepPeriod = "PT30S"; std::string const Defaults::EvsAutoEvict = "0"; std::string const Defaults::EvsVersion = "1"; std::string const Defaults::EvsUseAggregate = "true"; std::string const Defaults::PcAnnounceTimeout = "PT3S"; std::string const Defaults::PcChecksum = "false"; std::string const Defaults::PcIgnoreQuorum = "false"; std::string const Defaults::PcIgnoreSb = PcIgnoreQuorum; std::string const Defaults::PcNpvo = "false"; std::string const Defaults::PcVersion = "0"; std::string const Defaults::PcWaitPrim = "true"; std::string const Defaults::PcWaitPrimTimeout = "PT30S"; std::string const Defaults::PcWeight = "1"; std::string const Defaults::PcRecovery = "true"; std::string const Defaults::PcLinger = "PT20S"; } galera-4-26.4.25/gcomm/src/asio_udp.cpp000644 000164 177776 00000010041 15107057155 020710 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2020 Codership Oy */ #include "asio_udp.hpp" #include "gcomm/util.hpp" #include "gcomm/common.hpp" #include "gu_array.hpp" #include gcomm::AsioUdpSocket::AsioUdpSocket(AsioProtonet& net, const gu::URI& uri) : Socket(uri), net_(net), state_(S_CLOSED), socket_(net_.io_service_.make_datagram_socket(uri)), recv_buf_((1 << 15) + NetHeader::serial_size_) { } gcomm::AsioUdpSocket::~AsioUdpSocket() { socket_->close(); } void gcomm::AsioUdpSocket::connect(const gu::URI& uri) { gcomm_assert(state() == S_CLOSED); Critical crit(net_); socket_->connect(uri); async_receive(); state_ = S_CONNECTED; } void gcomm::AsioUdpSocket::close() { Critical crit(net_); socket_->close(); state_ = S_CLOSED; } int gcomm::AsioUdpSocket::send(int /* segment */, const Datagram& dg) { Critical crit(net_); NetHeader hdr(dg.len(), net_.version_); if (net_.checksum_ != NetHeader::CS_NONE) { hdr.set_crc32(crc32(net_.checksum_, dg), net_.checksum_); } // Make copy of datagram to be able to adjust the header Datagram priv_dg(dg); priv_dg.set_header_offset(priv_dg.header_offset() - NetHeader::serial_size_); serialize(hdr, priv_dg.header(), priv_dg.header_size(), priv_dg.header_offset()); std::array cbs; cbs[0] = gu::AsioConstBuffer(dg.header() + dg.header_offset(), dg.header_len()); cbs[1] = gu::AsioConstBuffer(dg.payload().data(), dg.payload().size()); try { socket_->write(cbs); } catch (const gu::Exception& e) { log_warn << "Error: " << e.what(); return e.get_errno(); } return 0; } void gcomm::AsioUdpSocket::read_handler(gu::AsioDatagramSocket&, const gu::AsioErrorCode& ec, size_t bytes_transferred) { if (ec) { // return; } if (bytes_transferred >= NetHeader::serial_size_) { Critical crit(net_); NetHeader hdr; try { unserialize(&recv_buf_[0], NetHeader::serial_size_, 0, hdr); } catch (gu::Exception& e) { log_warn << "hdr unserialize failed: " << e.get_errno(); return; } if (NetHeader::serial_size_ + hdr.len() != bytes_transferred) { log_warn << "len " << hdr.len() << " does not match to bytes transferred" << bytes_transferred; } else { Datagram dg( gu::SharedBuffer( new gu::Buffer(&recv_buf_[0] + NetHeader::serial_size_, &recv_buf_[0] + NetHeader::serial_size_ + hdr.len()))); if (net_.checksum_ == true && check_cs(hdr, dg)) { log_warn << "checksum failed, hdr: len=" << hdr.len() << " has_crc32=" << hdr.has_crc32() << " has_crc32c=" << hdr.has_crc32c() << " crc32=" << hdr.crc32(); } else { net_.dispatch(id(), dg, ProtoUpMeta()); } } } else { log_warn << "short read of " << bytes_transferred; } async_receive(); } void gcomm::AsioUdpSocket::async_receive() { Critical crit(net_); socket_->async_read(gu::AsioMutableBuffer(&recv_buf_[0], recv_buf_.size()), shared_from_this()); } size_t gcomm::AsioUdpSocket::mtu() const { return (1 << 15); } std::string gcomm::AsioUdpSocket::local_addr() const { return socket_->local_addr(); } std::string gcomm::AsioUdpSocket::remote_addr() const { // Not defined return ""; } galera-4-26.4.25/gcomm/src/evs_input_map2.cpp000644 000164 177776 00000027457 15107057155 022063 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #include "evs_input_map2.hpp" #include "gcomm/util.hpp" #include "gu_exception.hpp" #include "gu_logger.hpp" #include "gu_buffer.hpp" #include #include ////////////////////////////////////////////////////////////////////////// // // Static operators and functions // ////////////////////////////////////////////////////////////////////////// // Compare node index LUs class NodeIndexLUCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return (a.range().lu() < b.range().lu()); } }; class NodeIndexHSCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return (a.range().hs() < b.range().hs()); } }; // Compare node index safe seqs class NodeIndexSafeSeqCmpOp { public: bool operator()(const gcomm::evs::InputMapNodeIndex::value_type& a, const gcomm::evs::InputMapNodeIndex::value_type& b) const { return a.safe_seq() < b.safe_seq(); } }; ////////////////////////////////////////////////////////////////////////// // // Ostream operators // ////////////////////////////////////////////////////////////////////////// std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapNode& in) { return (os << "node: {" << "idx=" << in.index() << "," << "range=" << in.range() << "," << "safe_seq=" << in.safe_seq() << "}"); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapNodeIndex& ni) { copy(ni.begin(), ni.end(), std::ostream_iterator(os, " ")); return os; } std::ostream& gcomm::operator<<(std::ostream& os, const InputMapMsgKey& mk) { return (os << "(" << mk.index() << "," << mk.seq() << ")"); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMapMsg& m) { return (os << m.msg()); } std::ostream& gcomm::evs::operator<<(std::ostream& os, const InputMap& im) { return (os << "evs::input_map: {" << "aru_seq=" << im.aru_seq() << "," << "safe_seq=" << im.safe_seq() << "," << "node_index=" << *im.node_index_ #ifndef NDEBUG << "," << "msg_index=" << *im.msg_index_ << "," << "recovery_index=" << *im.recovery_index_ #endif // !NDEBUG << "}"); } ////////////////////////////////////////////////////////////////////////// // // Constructors/destructors // ////////////////////////////////////////////////////////////////////////// gcomm::evs::InputMap::InputMap() : safe_seq_ (-1), aru_seq_ (-1), node_index_ (new InputMapNodeIndex()), msg_index_ (new InputMapMsgIndex()), recovery_index_ (new InputMapMsgIndex()) { } gcomm::evs::InputMap::~InputMap() { clear(); delete node_index_; delete msg_index_; delete recovery_index_; } ////////////////////////////////////////////////////////////////////////// // // Public member functions // ////////////////////////////////////////////////////////////////////////// void gcomm::evs::InputMap::reset(const size_t nodes) { gcomm_assert(msg_index_->empty() == true && recovery_index_->empty() == true); node_index_->clear(); log_debug << " size " << node_index_->size(); gu_trace(node_index_->resize(nodes, InputMapNode())); for (size_t i = 0; i < nodes; ++i) { node_index_->at(i).set_index(i); } log_debug << *node_index_ << " size " << node_index_->size(); } gcomm::evs::seqno_t gcomm::evs::InputMap::min_hs() const { seqno_t ret; gcomm_assert(node_index_->empty() == false); ret = min_element(node_index_->begin(), node_index_->end(), NodeIndexHSCmpOp())->range().hs(); return ret; } gcomm::evs::seqno_t gcomm::evs::InputMap::max_hs() const { seqno_t ret; gcomm_assert(node_index_->empty() == false); ret = max_element(node_index_->begin(), node_index_->end(), NodeIndexHSCmpOp())->range().hs(); return ret; } void gcomm::evs::InputMap::set_safe_seq(const size_t uuid, const seqno_t seq) { gcomm_assert(seq != -1); // @note This assertion does not necessarily hold. Some other // instance may well have higher all received up to seqno // than this (due to packet loss). Commented out... and left // for future reference. // gcomm_assert(aru_seq != seqno_t::max() && seq <= aru_seq); // Update node safe seq. Must (at least should) be updated // in monotonically increasing order if node works ok. InputMapNode& node(node_index_->at(uuid)); gcomm_assert(seq >= node.safe_seq()) << "node.safe_seq=" << node.safe_seq() << " seq=" << seq; node.set_safe_seq(seq); // Update global safe seq which must be monotonically increasing. InputMapNodeIndex::const_iterator min = min_element(node_index_->begin(), node_index_->end(), NodeIndexSafeSeqCmpOp()); const seqno_t minval = min->safe_seq(); gcomm_assert(minval >= safe_seq_); safe_seq_ = minval; // Global safe seq must always be smaller than equal to aru seq gcomm_assert(safe_seq_ <= aru_seq_); // Cleanup recovery index cleanup_recovery_index(); } void gcomm::evs::InputMap::clear() { if (msg_index_->empty() == false) { log_warn << "discarding " << msg_index_->size() << " messages from message index"; } msg_index_->clear(); if (recovery_index_->empty() == false) { log_debug << "discarding " << recovery_index_->size() << " messages from recovery index"; } recovery_index_->clear(); node_index_->clear(); aru_seq_ = -1; safe_seq_ = -1; } gcomm::evs::Range gcomm::evs::InputMap::insert(const size_t uuid, const UserMessage& msg, const Datagram& rb) { Range range; // Only insert messages with meaningful seqno gcomm_assert(msg.seq() > -1); // User should check aru_seq before inserting. This check is left // also in optimized builds since violating it may cause duplicate // messages. gcomm_assert(aru_seq_ < msg.seq()) << "aru seq " << aru_seq_ << " msg seq " << msg.seq() << " index size " << msg_index_->size(); gcomm_assert(uuid < node_index_->size()); InputMapNode& node((*node_index_)[uuid]); range = node.range(); // User should check LU before inserting. This check is left // also in optimized builds since violating it may cause duplicate // messages gcomm_assert(range.lu() <= msg.seq()) << "lu " << range.lu() << " > " << msg.seq(); // Check whether this message has already been seen if (msg.seq() < node.range().lu() || (msg.seq() <= node.range().hs() && recovery_index_->find(InputMapMsgKey(node.index(), msg.seq())) != recovery_index_->end())) { return node.range(); } // Loop over message seqno range and insert messages when not // already found for (seqno_t s = msg.seq(); s <= msg.seq() + msg.seq_range(); ++s) { InputMapMsgIndex::iterator msg_i; if (range.hs() < s) { msg_i = msg_index_->end(); } else { msg_i = msg_index_->find(InputMapMsgKey(node.index(), s)); } if (msg_i == msg_index_->end()) { Datagram ins_dg(s == msg.seq() ? Datagram(rb) : Datagram()); gu_trace((void)msg_index_->insert_unique( std::make_pair( InputMapMsgKey(node.index(), s), InputMapMsg( (s == msg.seq() ? msg : UserMessage(msg.version(), msg.source(), msg.source_view_id(), s, msg.aru_seq(), 0, O_DROP)), ins_dg)))); } // Update highest seen if (range.hs() < s) { range.set_hs(s); } // Update lowest unseen if (range.lu() == s) { seqno_t i(s); do { ++i; } while ( i <= range.hs() && (msg_index_->find(InputMapMsgKey(node.index(), i)) != msg_index_->end() || recovery_index_->find(InputMapMsgKey(node.index(), i)) != recovery_index_->end())); range.set_lu(i); } } node.set_range(range); update_aru(); return range; } void gcomm::evs::InputMap::erase(iterator i) { gu_trace(recovery_index_->insert_unique(*i)); gu_trace(msg_index_->erase(i)); } gcomm::evs::InputMap::iterator gcomm::evs::InputMap::find(const size_t uuid, const seqno_t seq) const { iterator ret; const InputMapNode& node(node_index_->at(uuid)); const InputMapMsgKey key(node.index(), seq); gu_trace(ret = msg_index_->find(key)); return ret; } gcomm::evs::InputMap::iterator gcomm::evs::InputMap::recover(const size_t uuid, const seqno_t seq) const { iterator ret; const InputMapNode& node(node_index_->at(uuid)); const InputMapMsgKey key(node.index(), seq); gu_trace(ret = recovery_index_->find_checked(key)); return ret; } static void append_gap_range_list(std::vector& range_list, gcomm::evs::seqno_t lowest_unseen, gcomm::evs::seqno_t seq) { if (range_list.empty()) { range_list.push_back(gcomm::evs::Range(lowest_unseen, seq)); } else if (range_list.rbegin()->hs() + 1 == seq) { range_list.rbegin()->set_hs(seq); } else { range_list.push_back(gcomm::evs::Range(seq, seq)); } } std::vector gcomm::evs::InputMap::gap_range_list(size_t index, const Range& range) const { const InputMapNode& node(node_index_->at(index)); seqno_t max_lu(std::max(range.lu(), node.range().lu())); std::vector ret; for (seqno_t seq(range.lu()); seq <= range.hs(); ++seq) { const InputMapMsgKey key(index, seq); gcomm::evs::InputMap::const_iterator msg_i(msg_index_->find(key)); if (msg_i != msg_index_->end()) { continue; } msg_i = recovery_index_->find(key); if (msg_i != recovery_index_->end()) { continue; } append_gap_range_list(ret, max_lu, seq); } return ret; } ////////////////////////////////////////////////////////////////////////// // // Private member functions // ////////////////////////////////////////////////////////////////////////// inline void gcomm::evs::InputMap::update_aru() { InputMapNodeIndex::const_iterator min = min_element(node_index_->begin(), node_index_->end(), NodeIndexLUCmpOp()); const seqno_t minval = min->range().lu(); /* aru_seq must not decrease */ gcomm_assert(minval - 1 >= aru_seq_); aru_seq_ = minval - 1; } void gcomm::evs::InputMap::cleanup_recovery_index() { gcomm_assert(node_index_->size() > 0); InputMapMsgIndex::iterator i = recovery_index_->lower_bound( InputMapMsgKey(0, safe_seq_ + 1)); recovery_index_->erase(recovery_index_->begin(), i); } galera-4-26.4.25/gcomm/src/socket.hpp000644 000164 177776 00000011375 15107057155 020415 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2009-2019 Codership Oy // //! // @file socket.hpp Socket interface. // // This file defines socket interface used by gcomm. Currently socket interface // provides synchronous send() but only async_recv(). // #ifndef GCOMM_SOCKET_HPP #define GCOMM_SOCKET_HPP #include "gcomm/datagram.hpp" #include "gu_uri.hpp" namespace gcomm { typedef const void* SocketId; //!< Socket Identifier class Socket; //!< Socket interface typedef std::shared_ptr SocketPtr; class Acceptor; //!< Acceptor interfacemat /** * Statistics for socket connection. Currently relevant only * to TCP stream sockets and available on Linux/FreeBSD only. */ typedef struct socket_stats_st { /* Stats from kernel - tcp_info for TCP sockets. */ long rtt; /** RTT in usecs. */ long rttvar; /** RTT variance in usecs. */ long rto; /** Retransmission timeout in usecs. */ long lost; /** Estimate of lost packets (Linux only). */ long last_data_recv; /** Time since last received data in msecs. */ long cwnd; /** Congestion window */ /* Stats from userspace */ long last_queued_since; /** Last queued since in msecs */ long last_delivered_since; /** Last delivered since in msecs */ long send_queue_length; /** Number of messaged pending for send. */ long send_queue_bytes; /** Number of bytes in send queue. */ std::vector > send_queue_segments; socket_stats_st() : rtt(), rttvar(), rto(), lost(), last_data_recv(), cwnd(), last_queued_since(), last_delivered_since(), send_queue_length(), send_queue_bytes(), send_queue_segments() { } } SocketStats; static inline std::ostream& operator<<(std::ostream& os, const SocketStats& stats) { os << "rtt: " << stats.rtt << " rttvar: " << stats.rttvar << " rto: " << stats.rto << " lost: " << stats.lost << " last_data_recv: " << stats.last_data_recv << " cwnd: " << stats.cwnd << " last_queued_since: " << stats.last_queued_since << " last_delivered_since: " << stats.last_delivered_since << " send_queue_length: " << stats.send_queue_length << " send_queue_bytes: " << stats.send_queue_bytes; for (std::vector >::const_iterator i(stats.send_queue_segments.begin()); i != stats.send_queue_segments.end(); ++i) { os << " segment: " << i->first << " messages: " << i->second; } return os; } } class gcomm::Socket { public: typedef enum { S_CLOSED, S_CONNECTING, S_CONNECTED, S_FAILED, S_CLOSING } State; /** * Symbolic option names (to specify in URI) */ static const std::string OptNonBlocking; /*! socket.non_blocking */ static const std::string OptIfAddr; /*! socket.if_addr */ static const std::string OptIfLoop; /*! socket.if_loop */ static const std::string OptCRC32; /*! socket.crc32 */ static const std::string OptMcastTTL; /*! socket.mcast_ttl */ Socket(const gu::URI& uri) : uri_(uri) { } virtual ~Socket() { } virtual void connect(const gu::URI& uri) = 0; virtual void close() = 0; virtual void set_option(const std::string& key, const std::string& val) = 0; // Send a datagram originating from segment. The segment parameter // can be used by the implementation to implement fair queuing for // messages originating from different segments. virtual int send(int segment, const Datagram& dg) = 0; virtual void async_receive() = 0; virtual size_t mtu() const = 0; virtual std::string local_addr() const = 0; virtual std::string remote_addr() const = 0; virtual State state() const = 0; virtual SocketId id() const = 0; virtual SocketStats stats() const = 0; protected: const gu::URI uri_; }; class gcomm::Acceptor { public: typedef enum { S_CLOSED, S_LISTENING, S_FAILED } State; Acceptor(const gu::URI& uri) : uri_(uri) { } virtual ~Acceptor() { } virtual void listen(const gu::URI& uri) = 0; virtual std::string listen_addr() const = 0; virtual void close() = 0; virtual State state() const = 0; virtual SocketPtr accept() = 0; virtual SocketId id() const = 0; protected: const gu::URI uri_; }; #endif // GCOMM_SOCKET_HPP galera-4-26.4.25/gcomm/src/asio_protonet.hpp000644 000164 177776 00000003601 15107057155 022003 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy */ #ifndef GCOMM_ASIO_PROTONET_HPP #define GCOMM_ASIO_PROTONET_HPP #include "gcomm/protonet.hpp" #include "socket.hpp" #include "gu_monitor.hpp" #include "gu_asio.hpp" #include #include #include #include "gu_disable_non_virtual_dtor.hpp" namespace gcomm { class AsioProtonet; } class gcomm::AsioProtonet : public gcomm::Protonet { public: AsioProtonet(gu::Config& conf, int version = 0); ~AsioProtonet(); size_t event_loop(const gu::datetime::Period& p); void dispatch(const SocketId&, const Datagram&, const ProtoUpMeta&); void interrupt(); SocketPtr socket(const gu::URI&); std::shared_ptr acceptor(const gu::URI&); void enter(); void leave(); size_t mtu() const { return mtu_; } std::string get_ssl_password() const; private: bool timer_expired_; class TimerHandler : public gu::AsioSteadyTimerHandler , public std::enable_shared_from_this { public: TimerHandler(AsioProtonet& pnet) : pnet_(pnet) { } void handle_wait(const gu::AsioErrorCode& ec) { return pnet_.handle_wait(ec); } private: AsioProtonet& pnet_; }; friend class AsioTcpSocket; friend class AsioTcpAcceptor; friend class AsioUdpSocket; AsioProtonet(const AsioProtonet&); void handle_wait(const gu::AsioErrorCode& ec); gu::RecursiveMutex mutex_; gu::datetime::Date poll_until_; gu::AsioIoService io_service_; std::shared_ptr timer_handler_; gu::AsioSteadyTimer timer_; size_t mtu_; NetHeader::checksum_t checksum_; }; #include "gu_enable_non_virtual_dtor.hpp" #endif // GCOMM_ASIO_PROTONET_HPP galera-4-26.4.25/gcomm/src/conf.cpp000644 000164 177776 00000017632 15107057155 020047 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ #include "gcomm/conf.hpp" #include "defaults.hpp" #include "common.h" #include static std::string const Delim = "."; std::string const BaseHost(COMMON_BASE_HOST_KEY); std::string const BasePort(COMMON_BASE_PORT_KEY); // Protonet std::string const gcomm::Conf::ProtonetBackend("protonet.backend"); std::string const gcomm::Conf::ProtonetVersion("protonet.version"); // TCP static std::string const SocketPrefix("socket" + Delim); std::string const gcomm::Conf::TcpNonBlocking = SocketPrefix + "non_blocking"; std::string const gcomm::Conf::SocketChecksum = SocketPrefix + "checksum"; std::string const gcomm::Conf::SocketRecvBufSize = SocketPrefix + "recv_buf_size"; std::string const gcomm::Conf::SocketSendBufSize = SocketPrefix + "send_buf_size"; // GMCast std::string const gcomm::Conf::GMCastScheme = "gmcast"; static std::string const GMCastPrefix(gcomm::Conf::GMCastScheme + Delim); std::string const gcomm::Conf::GMCastVersion = GMCastPrefix + "version"; std::string const gcomm::Conf::GMCastGroup = GMCastPrefix + "group"; std::string const gcomm::Conf::GMCastListenAddr = GMCastPrefix + "listen_addr"; std::string const gcomm::Conf::GMCastMCastAddr = GMCastPrefix + "mcast_addr"; std::string const gcomm::Conf::GMCastMCastPort = GMCastPrefix + "mcast_port"; std::string const gcomm::Conf::GMCastMCastTTL = GMCastPrefix + "mcast_ttl"; std::string const gcomm::Conf::GMCastTimeWait = GMCastPrefix + "time_wait"; std::string const gcomm::Conf::GMCastPeerTimeout = GMCastPrefix + "peer_timeout"; std::string const gcomm::Conf::GMCastMaxInitialReconnectAttempts = GMCastPrefix + "mira"; std::string const gcomm::Conf::GMCastPeerAddr = GMCastPrefix + "peer_addr"; std::string const gcomm::Conf::GMCastIsolate = GMCastPrefix + "isolate"; std::string const gcomm::Conf::GMCastSegment = GMCastPrefix + "segment"; // EVS std::string const gcomm::Conf::EvsScheme = "evs"; static std::string const EvsPrefix(gcomm::Conf::EvsScheme + Delim); std::string const gcomm::Conf::EvsVersion = EvsPrefix + "version"; std::string const gcomm::Conf::EvsViewForgetTimeout = EvsPrefix + "view_forget_timeout"; std::string const gcomm::Conf::EvsInactiveTimeout = EvsPrefix + "inactive_timeout"; std::string const gcomm::Conf::EvsSuspectTimeout = EvsPrefix + "suspect_timeout"; std::string const gcomm::Conf::EvsInactiveCheckPeriod = EvsPrefix + "inactive_check_period"; std::string const gcomm::Conf::EvsInstallTimeout = EvsPrefix + "install_timeout"; std::string const gcomm::Conf::EvsKeepalivePeriod = EvsPrefix + "keepalive_period"; std::string const gcomm::Conf::EvsJoinRetransPeriod = EvsPrefix + "join_retrans_period"; std::string const gcomm::Conf::EvsStatsReportPeriod = EvsPrefix + "stats_report_period"; std::string const gcomm::Conf::EvsDebugLogMask = EvsPrefix + "debug_log_mask"; std::string const gcomm::Conf::EvsInfoLogMask = EvsPrefix + "info_log_mask"; std::string const gcomm::Conf::EvsSendWindow = EvsPrefix + "send_window"; std::string const gcomm::Conf::EvsUserSendWindow = EvsPrefix + "user_send_window"; std::string const gcomm::Conf::EvsUseAggregate = EvsPrefix + "use_aggregate"; std::string const gcomm::Conf::EvsCausalKeepalivePeriod = EvsPrefix + "causal_keepalive_period"; std::string const gcomm::Conf::EvsMaxInstallTimeouts = EvsPrefix + "max_install_timeouts"; std::string const gcomm::Conf::EvsDelayMargin = EvsPrefix + "delay_margin"; std::string const gcomm::Conf::EvsDelayedKeepPeriod = EvsPrefix + "delayed_keep_period"; std::string const gcomm::Conf::EvsEvict = EvsPrefix + "evict"; std::string const gcomm::Conf::EvsAutoEvict = EvsPrefix + "auto_evict"; // PC std::string const gcomm::Conf::PcScheme = "pc"; static std::string const PcPrefix(gcomm::Conf::PcScheme + Delim); std::string const gcomm::Conf::PcVersion = PcPrefix + "version"; std::string const gcomm::Conf::PcIgnoreSb = PcPrefix + "ignore_sb"; std::string const gcomm::Conf::PcIgnoreQuorum = PcPrefix + "ignore_quorum"; std::string const gcomm::Conf::PcChecksum = PcPrefix + "checksum"; std::string const gcomm::Conf::PcLinger = PcPrefix + "linger"; std::string const gcomm::Conf::PcAnnounceTimeout = PcPrefix + "announce_timeout"; std::string const gcomm::Conf::PcNpvo = PcPrefix + "npvo"; std::string const gcomm::Conf::PcBootstrap = PcPrefix + "bootstrap"; std::string const gcomm::Conf::PcWaitPrim = PcPrefix + "wait_prim"; std::string const gcomm::Conf::PcWaitPrimTimeout = PcPrefix + "wait_prim_timeout"; std::string const gcomm::Conf::PcWeight = PcPrefix + "weight"; std::string const gcomm::Conf::PcRecovery = PcPrefix + "recovery"; void gcomm::Conf::register_params(gu::Config& cnf) { #define GCOMM_CONF_ADD(_x_) cnf.add(_x_, Flags::_x_); #define GCOMM_CONF_ADD_DEFAULT(_x_) cnf.add(_x_, Defaults::_x_, Flags::_x_); GCOMM_CONF_ADD (BaseHost); GCOMM_CONF_ADD (BasePort); GCOMM_CONF_ADD_DEFAULT(ProtonetBackend); GCOMM_CONF_ADD_DEFAULT(ProtonetVersion); GCOMM_CONF_ADD (TcpNonBlocking); GCOMM_CONF_ADD_DEFAULT(SocketChecksum); GCOMM_CONF_ADD_DEFAULT(SocketRecvBufSize); GCOMM_CONF_ADD_DEFAULT(SocketSendBufSize); GCOMM_CONF_ADD_DEFAULT(GMCastVersion); GCOMM_CONF_ADD (GMCastGroup); GCOMM_CONF_ADD (GMCastListenAddr); GCOMM_CONF_ADD (GMCastMCastAddr); GCOMM_CONF_ADD (GMCastMCastPort); GCOMM_CONF_ADD_DEFAULT(GMCastMCastTTL); GCOMM_CONF_ADD (GMCastMCastAddr); GCOMM_CONF_ADD_DEFAULT(GMCastTimeWait); GCOMM_CONF_ADD_DEFAULT(GMCastPeerTimeout); GCOMM_CONF_ADD (GMCastMaxInitialReconnectAttempts); GCOMM_CONF_ADD (GMCastPeerAddr); GCOMM_CONF_ADD (GMCastIsolate); GCOMM_CONF_ADD_DEFAULT(GMCastSegment); GCOMM_CONF_ADD_DEFAULT(EvsVersion); GCOMM_CONF_ADD_DEFAULT(EvsViewForgetTimeout); GCOMM_CONF_ADD_DEFAULT(EvsSuspectTimeout); GCOMM_CONF_ADD_DEFAULT(EvsInactiveTimeout); GCOMM_CONF_ADD_DEFAULT(EvsInactiveCheckPeriod); GCOMM_CONF_ADD (EvsInstallTimeout); GCOMM_CONF_ADD_DEFAULT(EvsKeepalivePeriod); GCOMM_CONF_ADD_DEFAULT(EvsJoinRetransPeriod); GCOMM_CONF_ADD_DEFAULT(EvsStatsReportPeriod); GCOMM_CONF_ADD_DEFAULT(EvsDebugLogMask); GCOMM_CONF_ADD_DEFAULT(EvsInfoLogMask); GCOMM_CONF_ADD_DEFAULT(EvsSendWindow); GCOMM_CONF_ADD_DEFAULT(EvsUserSendWindow); GCOMM_CONF_ADD_DEFAULT(EvsUseAggregate); GCOMM_CONF_ADD_DEFAULT(EvsCausalKeepalivePeriod); GCOMM_CONF_ADD_DEFAULT(EvsMaxInstallTimeouts); GCOMM_CONF_ADD_DEFAULT(EvsDelayMargin); GCOMM_CONF_ADD_DEFAULT(EvsDelayedKeepPeriod); GCOMM_CONF_ADD (EvsEvict); GCOMM_CONF_ADD_DEFAULT(EvsAutoEvict); GCOMM_CONF_ADD_DEFAULT(PcVersion); GCOMM_CONF_ADD_DEFAULT(PcIgnoreSb); GCOMM_CONF_ADD_DEFAULT(PcIgnoreQuorum); GCOMM_CONF_ADD_DEFAULT(PcChecksum); GCOMM_CONF_ADD_DEFAULT(PcAnnounceTimeout); GCOMM_CONF_ADD_DEFAULT(PcLinger); GCOMM_CONF_ADD_DEFAULT(PcNpvo); GCOMM_CONF_ADD (PcBootstrap); GCOMM_CONF_ADD_DEFAULT(PcWaitPrim); GCOMM_CONF_ADD_DEFAULT(PcWaitPrimTimeout); GCOMM_CONF_ADD_DEFAULT(PcWeight); GCOMM_CONF_ADD_DEFAULT(PcRecovery); #undef GCOMM_CONF_ADD #undef GCOMM_CONF_ADD_DEFAULT } void gcomm::Conf::check_params(const gu::Config& conf) { check_recv_buf_size(conf.get(SocketRecvBufSize)); } size_t gcomm::Conf::check_recv_buf_size(const std::string& str) { // signed type to check for negative values return (str == Defaults::SocketRecvBufSize || check_range(SocketRecvBufSize, str, 0, std::numeric_limits::max())); } size_t gcomm::Conf::check_send_buf_size(const std::string& str) { // signed type to check for negative values return (str == Defaults::SocketSendBufSize || check_range(SocketSendBufSize, str, 0, std::numeric_limits::max())); } galera-4-26.4.25/gcomm/src/socket.cpp000644 000164 177776 00000001035 15107057155 020400 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2012 Codership Oy // #include "socket.hpp" static const std::string SocketOptPrefix = "socket."; const std::string gcomm::Socket::OptNonBlocking = SocketOptPrefix + "non_blocking"; const std::string gcomm::Socket::OptIfAddr = SocketOptPrefix + "if_addr"; const std::string gcomm::Socket::OptIfLoop = SocketOptPrefix + "if_loop"; const std::string gcomm::Socket::OptCRC32 = SocketOptPrefix + "crc32"; const std::string gcomm::Socket::OptMcastTTL = SocketOptPrefix + "mcast_ttl"; galera-4-26.4.25/gcomm/src/protonet.cpp000644 000164 177776 00000004035 15107057155 020765 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ #ifdef HAVE_ASIO_HPP #include "asio_protonet.hpp" #endif // HAVE_ASIO_HPP #include "gcomm/util.hpp" #include "gcomm/conf.hpp" void gcomm::Protonet::insert(Protostack* pstack) { log_debug << "insert pstack " << pstack; if (find(protos_.begin(), protos_.end(), pstack) != protos_.end()) { gu_throw_fatal; } protos_.push_back(pstack); } void gcomm::Protonet::erase(Protostack* pstack) { log_debug << "erase pstack " << pstack; std::deque::iterator i; if ((i = find(protos_.begin(), protos_.end(), pstack)) == protos_.end()) { gu_throw_fatal; } protos_.erase(i); } gu::datetime::Date gcomm::Protonet::handle_timers() { Critical crit(*this); gu::datetime::Date next_time(gu::datetime::Date::max()); { for (std::deque::iterator i = protos_.begin(); i != protos_.end(); ++i) { next_time = std::min(next_time, (*i)->handle_timers()); } } return next_time; } bool gcomm::Protonet::set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb) { bool ret(false); for (std::deque::iterator i(protos_.begin()); i != protos_.end(); ++i) { ret |= (*i)->set_param(key, val, sync_param_cb); } return ret; } gcomm::Protonet* gcomm::Protonet::create(gu::Config& conf) { const std::string backend(conf.get(Conf::ProtonetBackend)); const int version(conf.get(Conf::ProtonetVersion)); if (version > max_version_) { gu_throw_error(EINVAL) << "invalid protonet version: " << version; } log_info << "protonet " << backend << " version " << version; if (backend == "asio") return new AsioProtonet(conf, version); gu_throw_fatal << Conf::ProtonetBackend << " '" << backend << "' not supported"; throw; return 0; // keep compiler happy } galera-4-26.4.25/gcomm/src/datagram.cpp000644 000164 177776 00000005616 15107057155 020701 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2013 Codership Oy */ #include "gcomm/datagram.hpp" #include "gu_crc.hpp" // CRC-32C - optimized and potentially accelerated #include "gu_logger.hpp" #include "gu_throw.hpp" #include // CRC32 - backward compatible gcomm::NetHeader::checksum_t gcomm::NetHeader::checksum_type (int i) { switch(i) { case CS_NONE: log_info << "Message checksums disabled."; return CS_NONE; case CS_CRC32: log_info << "Using CRC-32 (backward-compatible) for message checksums."; return CS_CRC32; case CS_CRC32C: log_info << "Using CRC-32C for message checksums."; return CS_CRC32C; } log_warn << "Ignoring unknown checksum type: " << i << ". Falling back to CRC-32."; return CS_CRC32; } uint16_t gcomm::crc16(const gcomm::Datagram& dg, size_t offset) { assert(offset < dg.len()); gu::byte_t lenb[4]; gu::serialize4(static_cast(dg.len() - offset), lenb, sizeof(lenb), 0); boost::crc_16_type crc; crc.process_block(lenb, lenb + sizeof(lenb)); if (offset < dg.header_len()) { crc.process_block(dg.header_ + dg.header_offset_ + offset, dg.header_ + dg.header_size_); offset = 0; } else { offset -= dg.header_len(); } crc.process_block(dg.payload_->data() + offset, dg.payload_->data() + dg.payload_->size()); return crc.checksum(); } uint32_t gcomm::crc32(gcomm::NetHeader::checksum_t const type, const gcomm::Datagram& dg, size_t offset) { gu::byte_t lenb[4]; gu::serialize4(static_cast(dg.len() - offset), lenb, sizeof(lenb), 0); if (NetHeader::CS_CRC32 == type) { boost::crc_32_type crc; crc.process_block(lenb, lenb + sizeof(lenb)); if (offset < dg.header_len()) { crc.process_block(dg.header_ + dg.header_offset_ + offset, dg.header_ + dg.header_size_); offset = 0; } else { offset -= dg.header_len(); } crc.process_block(dg.payload_->data() + offset, dg.payload_->data() + dg.payload_->size()); return crc.checksum(); } else if (NetHeader::CS_CRC32C == type) { gu::CRC32C crc; crc.append (lenb, sizeof(lenb)); if (offset < dg.header_len()) { crc.append (dg.header_ + dg.header_offset_ + offset, dg.header_size_ - dg.header_offset_ - offset); offset = 0; } else { offset -= dg.header_len(); } crc.append (dg.payload_->data() + offset, dg.payload_->size() - offset); return crc(); } gu_throw_error(EINVAL) << "Unsupported checksum algorithm: " << type; } galera-4-26.4.25/gcomm/src/evs_consensus.cpp000644 000164 177776 00000042730 15107057155 022014 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ #include "evs_consensus.hpp" #include "evs_message2.hpp" #include "evs_input_map2.hpp" #include "evs_node.hpp" #include "evs_proto.hpp" #include "gcomm/view.hpp" #include "gu_logger.hpp" #include // Disable debug logging until debug mask is available here #define evs_log_debug(i) if ((proto_.debug_mask_ & gcomm::evs::Proto::D_CONSENSUS) == 0) \ {} else log_debug << proto_.uuid() << " " // // Helpers // class LeaveSeqCmpOp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { using gcomm::evs::MessageNode; using gcomm::evs::MessageNodeList; const MessageNode& aval(MessageNodeList::value(a)); const MessageNode& bval(MessageNodeList::value(b)); gcomm_assert(aval.leaving() != false && bval.leaving() != false); const gcomm::evs::seqno_t asec(aval.leave_seq()); const gcomm::evs::seqno_t bsec(bval.leave_seq()); gcomm_assert(asec != -1 && bsec != -1); return (asec < bsec); } }; class RangeLuCmp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { return (gcomm::evs::MessageNodeList::value(a).im_range().lu() < gcomm::evs::MessageNodeList::value(b).im_range().lu()); } }; class SafeSeqCmp { public: bool operator()(const gcomm::evs::MessageNodeList::value_type& a, const gcomm::evs::MessageNodeList::value_type& b) const { return (gcomm::evs::MessageNodeList::value(a).safe_seq() < gcomm::evs::MessageNodeList::value(b).safe_seq()); } }; // // // bool gcomm::evs::Consensus::equal(const Message& m1, const Message& m2) const { gcomm_assert(m1.type() == Message::EVS_T_JOIN || m1.type() == Message::EVS_T_INSTALL); gcomm_assert(m2.type() == Message::EVS_T_JOIN || m2.type() == Message::EVS_T_INSTALL); // Seq and aru seq are comparable only if coming from same view if (m1.source_view_id() == m2.source_view_id()) { if (m1.seq() != m2.seq()) { evs_log_debug(D_CONSENSUS) << "seq not equal " << m1.seq() << " " << m2.seq(); return false; } if (m1.aru_seq() != m2.aru_seq()) { evs_log_debug(D_CONSENSUS) << "aruseq not equal " << m1.aru_seq() << " " << m2.aru_seq(); return false; } } MessageNodeList nl1, nl2; // When comparing messages from same source whole node list is comparable, // otherwise only operational part of it. if (m1.source() == m2.source()) { for_each(m1.node_list().begin(), m1.node_list().end(), SelectNodesOp(nl1, m1.source_view_id(), true, true)); for_each(m2.node_list().begin(), m2.node_list().end(), SelectNodesOp(nl2, m2.source_view_id(), true, true)); } else { for_each(m1.node_list().begin(), m1.node_list().end(), SelectNodesOp(nl1, ViewId(), true, false)); for_each(m2.node_list().begin(), m2.node_list().end(), SelectNodesOp(nl2, ViewId(), true, false)); } evs_log_debug(D_CONSENSUS) << "nl1: " << nl1 << " nl2: " << nl2; return (nl1 == nl2); } gcomm::evs::seqno_t gcomm::evs::Consensus::highest_reachable_safe_seq() const { std::vector seq_list; seq_list.reserve(known_.size()); for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); const JoinMessage* jm(node.join_message()); const LeaveMessage* lm(node.leave_message()); if ((jm == 0 && current_view_.is_member(NodeMap::key(i)) == true) || (jm != 0 && jm->source_view_id() == current_view_.id()) || (lm != 0 && lm->source_view_id() == current_view_.id())) { if (lm != 0) { if (proto_.is_all_suspected(uuid) == false) { seq_list.push_back(lm->seq()); } } else if (node.operational() == false) { seq_list.push_back( std::min( input_map_.safe_seq(node.index()), input_map_.range(node.index()).lu() - 1)); } else { seq_list.push_back(input_map_.range(node.index()).hs()); } } } return *std::min_element(seq_list.begin(), seq_list.end()); } gcomm::evs::seqno_t gcomm::evs::Consensus::safe_seq_wo_all_susupected_leaving_nodes() const { seqno_t safe_seq(-2); for(NodeMap::const_iterator i = proto_.known_.begin(); i != proto_.known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node.index() != std::numeric_limits::max()) { if (node.operational() == false && node.leave_message() && proto_.is_all_suspected(uuid)) { continue; } seqno_t ss = input_map_.safe_seq(node.index()); if (safe_seq == -2 || ss < safe_seq) { safe_seq = ss; } } } return safe_seq; } namespace gcomm { namespace evs { class FilterAllSuspectedOp { public: FilterAllSuspectedOp(MessageNodeList& nl, const Proto& proto) : nl_(nl), proto_(proto) {} void operator()(const MessageNodeList::value_type& vt) const { const UUID& uuid(MessageNodeList::key(vt)); if (!proto_.is_all_suspected(uuid)) { nl_.insert_unique(vt); } } private: MessageNodeList& nl_; const Proto& proto_; }; } // evs } // gcomm bool gcomm::evs::Consensus::is_consistent_highest_reachable_safe_seq( const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); const MessageNodeList& node_list(msg.node_list()); // Same view MessageNodeList same_view; for_each(node_list.begin(), node_list.end(), SelectNodesOp(same_view, current_view_.id(), true, false)); MessageNodeList::const_iterator max_hs_i(max_element(same_view.begin(), same_view.end(), RangeHsCmp())); gcomm_assert(max_hs_i != same_view.end()); // Max highest seen const seqno_t max_hs( MessageNodeList::value(max_hs_i).im_range().hs()); seqno_t max_reachable_safe_seq(max_hs); // Leaving Nodes MessageNodeList t_leaving; for_each(node_list.begin(), node_list.end(), SelectNodesOp(t_leaving, current_view_.id(), false, true)); MessageNodeList leaving; for_each(t_leaving.begin(), t_leaving.end(), FilterAllSuspectedOp(leaving, proto_)); if (leaving.empty() == false) { const MessageNodeList::const_iterator min_leave_seq_i( std::min_element(leaving.begin(), leaving.end(), LeaveSeqCmpOp())); gcomm_assert(min_leave_seq_i != leaving.end()); const seqno_t min_leave_seq( MessageNodeList::value(min_leave_seq_i).leave_seq()); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_leave_seq); } // Partitioning nodes MessageNodeList partitioning; for_each(node_list.begin(), node_list.end(), SelectNodesOp(partitioning, current_view_.id(), false, false)); if (partitioning.empty() == false) { MessageNodeList::const_iterator min_part_safe_seq_i( std::min_element(partitioning.begin(), partitioning.end(), SafeSeqCmp())); gcomm_assert(min_part_safe_seq_i != partitioning.end()); const seqno_t min_part_safe_seq( MessageNodeList::value(min_part_safe_seq_i).safe_seq()); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_part_safe_seq); MessageNodeList::const_iterator min_part_lu_i( std::min_element(partitioning.begin(), partitioning.end(), RangeLuCmp())); gcomm_assert(min_part_lu_i != partitioning.end()); const seqno_t min_part_lu(MessageNodeList::value(min_part_lu_i).im_range().lu() - 1); max_reachable_safe_seq = std::min(max_reachable_safe_seq, min_part_lu); } evs_log_debug(D_CONSENSUS) << " max reachable safe seq " << max_reachable_safe_seq << " highest reachable safe seq " << highest_reachable_safe_seq() << " max_hs " << max_hs << " input map max hs " << input_map_.max_hs() << " input map safe_seq " << input_map_.safe_seq() << " safe seq wo suspected leaving nodes " << safe_seq_wo_all_susupected_leaving_nodes(); return (input_map_.max_hs() == max_hs && highest_reachable_safe_seq() == max_reachable_safe_seq && // input_map_.safe_seq() == max_reachable_safe_seq); safe_seq_wo_all_susupected_leaving_nodes() == max_reachable_safe_seq); } bool gcomm::evs::Consensus::is_consistent_input_map(const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); if (msg.aru_seq() != input_map_.aru_seq()) { evs_log_debug(D_CONSENSUS) << "message aru seq " << msg.aru_seq() << " not consistent with input map aru seq " << input_map_.aru_seq(); return false; } if (msg.seq() != input_map_.safe_seq()) { evs_log_debug(D_CONSENSUS) << "message safe seq " << msg.seq() << " not consistent with input map safe seq " << input_map_.safe_seq(); return false; } Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (current_view_.is_member(uuid) == true) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(node.index())))); } } const MessageNodeList& m_insts(msg.node_list()); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& msg_uuid(MessageNodeList::key(i)); const MessageNode& msg_inst(MessageNodeList::value(i)); if (msg_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(msg_uuid, msg_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg_insts " << msg_insts << " local_insts " << local_insts; return (msg_insts == local_insts); } bool gcomm::evs::Consensus::is_consistent_partitioning(const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); // Compare instances that were present in the current view but are // not proceeding in the next view. Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node.operational() == false && node.leave_message() == 0 && current_view_.is_member(uuid) == true) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(node.index())))); } } const MessageNodeList& m_insts = msg.node_list(); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& m_uuid(MessageNodeList::key(i)); const MessageNode& m_inst(MessageNodeList::value(i)); if (m_inst.operational() == false && m_inst.leaving() == false && m_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(m_uuid, m_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg insts:\n" << msg_insts << " local insts:\n" << local_insts; return (msg_insts == local_insts); } bool gcomm::evs::Consensus::is_consistent_leaving(const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); // Compare instances that were present in the current view but are // not proceeding in the next view. Map local_insts, msg_insts; for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); const LeaveMessage* lm(inst.leave_message()); if (inst.operational() == false && lm != 0 && lm->source_view_id() == current_view_.id()) { gu_trace((void)local_insts.insert_unique( std::make_pair(uuid, input_map_.range(inst.index())))); } } const MessageNodeList& m_insts = msg.node_list(); for (MessageNodeList::const_iterator i = m_insts.begin(); i != m_insts.end(); ++i) { const UUID& m_uuid(MessageNodeList::key(i)); const MessageNode& m_inst(MessageNodeList::value(i)); if (m_inst.operational() == false && m_inst.leaving() == true && m_inst.view_id() == current_view_.id()) { gu_trace((void)msg_insts.insert_unique( std::make_pair(m_uuid, m_inst.im_range()))); } } evs_log_debug(D_CONSENSUS) << " msg insts " << msg_insts << " local insts " << local_insts; return (local_insts == msg_insts); } bool gcomm::evs::Consensus::is_consistent_same_view(const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); gcomm_assert(msg.source_view_id() == current_view_.id()); if (is_consistent_highest_reachable_safe_seq(msg) == false) { evs_log_debug(D_CONSENSUS) << "highest reachable safe seq not consistent"; return false; } if (is_consistent_input_map(msg) == false) { evs_log_debug(D_CONSENSUS) << "input map not consistent with " << msg; return false; } if (is_consistent_partitioning(msg) == false) { evs_log_debug(D_CONSENSUS) << "partitioning not consistent with " << msg; return false; } if (is_consistent_leaving(msg) == false) { evs_log_debug(D_CONSENSUS) << "leaving not consistent with " << msg; return false; } return true; } bool gcomm::evs::Consensus::is_consistent(const Message& msg) const { gcomm_assert(msg.type() == Message::EVS_T_JOIN || msg.type() == Message::EVS_T_INSTALL); const JoinMessage* my_jm = NodeMap::value(known_.find_checked(proto_.uuid())).join_message(); if (my_jm == 0) { return false; } if (msg.source_view_id() == current_view_.id()) { return (is_consistent_same_view(msg) == true && equal(msg, *my_jm) == true); } else { return equal(msg, *my_jm); } } bool gcomm::evs::Consensus::is_consensus() const { const JoinMessage* my_jm = NodeMap::value(known_.find_checked(proto_.uuid())).join_message(); if (my_jm == 0) { evs_log_debug(D_CONSENSUS) << "no own join message"; return false; } if (is_consistent_same_view(*my_jm) == false) { evs_log_debug(D_CONSENSUS) << "own join message not consistent"; return false; } for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const Node& inst(NodeMap::value(i)); if (inst.operational() == true) { const JoinMessage* jm = inst.join_message(); if (jm == 0) { evs_log_debug(D_CONSENSUS) << "no join message for " << NodeMap::key(i); return false; } // call is_consistent() instead of equal() to enforce strict // check for messages originating from the same view (#541) if (is_consistent(*jm) == false) { evs_log_debug(D_CONSENSUS) << "join message " << *jm << " not consistent with my join " << *my_jm; return false; } } } return true; } galera-4-26.4.25/gcomm/src/gcomm/000755 000164 177776 00000000000 15107057160 017503 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcomm/src/gcomm/common.hpp000644 000164 177776 00000001350 15107057155 021507 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2012 Codership Oy */ /*! * @file common.hpp * * @brief Imports definitions from the global common.h */ #ifndef GCOMM_COMMON_HPP #define GCOMM_COMMON_HPP #if defined(HAVE_COMMON_H) #include #endif #include namespace gcomm { #if defined(HAVE_COMMON_H) static std::string const BASE_PORT_KEY(COMMON_BASE_PORT_KEY); static std::string const BASE_PORT_DEFAULT(COMMON_BASE_PORT_DEFAULT); static std::string const BASE_DIR_DEFAULT(COMMON_BASE_DIR_DEFAULT); #else static std::string const BASE_PORT_KEY("base_port"); static std::string const BASE_PORT_DEFAULT("4567"); static std::string const BASE_DIR_DEFAULT("."); #endif } #endif /* GCOMM_COMMON_HPP */ galera-4-26.4.25/gcomm/src/gcomm/util.hpp000644 000164 177776 00000005205 15107057155 021177 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2012 Codership Oy */ #ifndef _GCOMM_UTIL_HPP_ #define _GCOMM_UTIL_HPP_ #include "gcomm/datagram.hpp" #include "gu_logger.hpp" #include "gu_throw.hpp" #include namespace gcomm { inline std::string uri_string (const std::string& scheme, const std::string& addr, const std::string& port = std::string("")) { if (port.length() > 0) return (scheme + "://" + addr + ':' + port); else return (scheme + "://" + addr); } inline bool host_is_any (const std::string& host) { return (host.length() == 0 || host == "0.0.0.0" || host.find ("::/128") <= 1); } template size_t serialize(const C& c, gu::Buffer& buf) { const size_t prev_size(buf.size()); buf.resize(buf.size() + c.serial_size()); size_t ret; gu_trace(ret = c.serialize(&buf[0] + prev_size, buf.size(), prev_size)); assert(ret == prev_size + c.serial_size()); return ret; } template size_t unserialize(const gu::Buffer& buf, size_t offset, C& c) { size_t ret; gu_trace(ret = c.unserialize(buf, buf.size(), offset)); return ret; } template void push_header(const M& msg, Datagram& dg) { if (dg.header_offset() < msg.serial_size()) { gu_throw_fatal; } msg.serialize(dg.header(), dg.header_size(), dg.header_offset() - msg.serial_size()); dg.set_header_offset(dg.header_offset() - msg.serial_size()); } template void pop_header(const M& msg, Datagram& dg) { assert(dg.header_size() >= dg.header_offset() + msg.serial_size()); dg.set_header_offset(dg.header_offset() + msg.serial_size()); } inline const gu::byte_t* begin(const Datagram& dg) { return (dg.offset() < dg.header_len() ? dg.header() + dg.header_offset() + dg.offset() : dg.payload().data() + (dg.offset() - dg.header_len())); } inline size_t available(const Datagram& dg) { return (dg.offset() < dg.header_len() ? dg.header_len() - dg.offset() : dg.payload().size() - (dg.offset() - dg.header_len())); } template class Critical { public: Critical(M& monitor) : monitor_(monitor) { monitor_.enter(); } ~Critical() { monitor_.leave(); } private: M& monitor_; }; } // namespace gcomm #endif // _GCOMM_UTIL_HPP_ galera-4-26.4.25/gcomm/src/gcomm/uuid.hpp000644 000164 177776 00000005517 15107057155 021176 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #ifndef _GCOMM_UUID_HPP_ #define _GCOMM_UUID_HPP_ #include "gcomm/exception.hpp" #include "gcomm/types.hpp" #include "gu_utils.hpp" #include "gu_assert.hpp" #include "gu_byteswap.h" #include "gu_uuid.hpp" #include #include namespace gcomm { class UUID; std::ostream& operator<<(std::ostream&, const UUID&); } class gcomm::UUID : public gu::UUID_base { public: UUID() : gu::UUID_base() {} UUID(const void* node, const size_t node_len) : gu::UUID_base(node, node_len) {} UUID(const int32_t idx) : gu::UUID_base() { assert(idx > 0); memcpy(&uuid_, &idx, sizeof(idx)); } static const UUID& nil() { return uuid_nil_; } // Print UUID to stream. If the full equals to true, // whole UUID string is printed. If it is false, the // first 4 bytes and 2 bytes matching to incarnation // number are printed in form of // - std::ostream& to_stream(std::ostream& os, bool full) const { std::ios_base::fmtflags saved = os.flags(); if (full == true) { os << uuid_; } else { os << std::hex << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[0]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[1]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[2]) << std::setfill('0') << std::setw(2) << static_cast(uuid_.data[3]) << "-" << std::setfill('0') << std::setw(4) << get_incarnation(); } os.flags(saved); return os; } // Prefer the above function over this one std::string full_str() const { std::ostringstream os; to_stream(os, true); return os.str(); } // Return incarnation number. uint16_t get_incarnation() const { const uint16_t* data = reinterpret_cast(uuid_.data); return gu_be16(data[4]); } void increment_incarnation() { uint16_t* data = reinterpret_cast(uuid_.data); uint16_t inc = gu_be16(data[4]); inc++; data[4] = gu_be16(inc); } bool fixed_part_matches(const gcomm::UUID& uuid) const { return ((memcmp(uuid_.data, uuid.ptr()->data, 8) == 0) && (memcmp(uuid_.data+10, uuid.ptr()->data+10, 6) == 0)); } private: static const UUID uuid_nil_; UUID(gu_uuid_t uuid) : gu::UUID_base(uuid) {} }; inline std::ostream& gcomm::operator<<(std::ostream& os, const UUID& uuid) { return uuid.to_stream (os, false); } #endif // _GCOMM_UUID_HPP_ galera-4-26.4.25/gcomm/src/gcomm/protonet.hpp000644 000164 177776 00000005556 15107057155 022105 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2009 Codership Oy // //! // @file protonet.hpp // // This file defines protonet interface used by gcomm. // #ifndef GCOMM_PROTONET_HPP #define GCOMM_PROTONET_HPP #include "gu_uri.hpp" #include "gu_datetime.hpp" #include "protostack.hpp" #include "gu_config.hpp" #include "socket.hpp" #include #include #ifndef GCOMM_PROTONET_MAX_VERSION #define GCOMM_PROTONET_MAX_VERSION 0 #endif // GCOMM_PROTONET_MAX_VERSION namespace gcomm { // Forward declarations class Protonet; } //! // Abstract Protonet interface class // class gcomm::Protonet { public: Protonet(gu::Config& conf, const std::string& type, int version) : protos_ (), version_(version), conf_ (conf), type_ (type) { } virtual ~Protonet() { } //! // Insert Protostack to be handled by Protonet // // @param pstack Pointer to Protostack // void insert(Protostack* pstack); //! // Erase Protostack from Protonet to stop dispatching events // to Protostack // // @param pstack Pointer to Protostack // void erase(Protostack* pstack); //! // Create new Socket // // @param uri URI to specify Socket type // // @return Socket // virtual gcomm::SocketPtr socket(const gu::URI& uri) = 0; //! // Create new Acceptor // // @param uri URI to specify Acceptor type // // @return Acceptor // virtual std::shared_ptr acceptor(const gu::URI& uri) = 0; //! // Dispatch events until period p has passed or event // loop is interrupted. // // @param p Period to run event_loop(), negative value means forever // // @return Number of events processed, excluding the timer event used to // interrupt the loop // virtual size_t event_loop(const gu::datetime::Period& p) = 0; //! // Iterate over Protostacks and handle timers // // @return Time of next known timer expiration // gu::datetime::Date handle_timers(); //! // Interrupt event loop // virtual void interrupt() = 0; //! // Enter Protonet critical section // virtual void enter() = 0; //! // Leave Protonet critical section // virtual void leave() = 0; bool set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb); gu::Config& conf() { return conf_; } //! // Factory method for creating Protonets // static Protonet* create(gu::Config& conf); const std::string& type() const { return type_; } virtual size_t mtu() const = 0; protected: std::deque protos_; int version_; static const int max_version_ = GCOMM_PROTONET_MAX_VERSION; gu::Config& conf_; private: std::string type_; }; #endif // GCOMM_PROTONET_HPP galera-4-26.4.25/gcomm/src/gcomm/map.hpp000644 000164 177776 00000017252 15107057155 021004 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ /*! * @file map.hpp * * This file contains templates that are thin wrappers for std::map * and std::multimap with some extra functionality. */ #ifndef GCOMM_MAP_HPP #define GCOMM_MAP_HPP #include "gu_serialize.hpp" #include #include #include #include "gcomm/exception.hpp" #include "gcomm/types.hpp" namespace gcomm { template class MapBase { typedef C MapType; public: typedef typename MapType::iterator iterator; typedef typename MapType::const_iterator const_iterator; typedef typename MapType::reverse_iterator reverse_iterator; typedef typename MapType::const_reverse_iterator const_reverse_iterator; typedef typename MapType::value_type value_type; typedef typename MapType::const_reference const_reference; typedef typename MapType::key_type key_type; typedef typename MapType::mapped_type mapped_type; protected: MapType map_; public: MapBase() : map_() {} virtual ~MapBase() {} iterator begin() { return map_.begin(); } iterator end() { return map_.end(); } iterator find(const K& k) { return map_.find(k); } iterator find_checked(const K& k) { iterator ret = map_.find(k); if (ret == map_.end()) { gu_throw_fatal << "element " << k << " not found"; } return ret; } iterator lower_bound(const K& k) { return map_.lower_bound(k); } const_iterator begin() const { return map_.begin(); } const_iterator end() const { return map_.end(); } const_reverse_iterator rbegin() const { return map_.rbegin(); } const_reverse_iterator rend() const { return map_.rend(); } const_iterator find(const K& k) const { return map_.find(k); } const_iterator find_checked(const K& k) const { const_iterator ret = map_.find(k); if (ret == map_.end()) { gu_throw_fatal << "element " << k << " not found"; } return ret; } mapped_type& operator[](const key_type& k) { return map_[k]; } void erase(iterator i) { map_.erase(i); } void erase(iterator i, iterator j) { map_.erase(i, j); } void erase(const K& k) { map_.erase(k); } void clear() { map_.clear(); } size_t size() const { return map_.size(); } bool empty() const { return map_.empty(); } size_t serialize(gu::byte_t* const buf, size_t const buflen, size_t offset) const { gu_trace(offset = gu::serialize4( static_cast(size()), buf, buflen, offset)); for (const_iterator i = map_.begin(); i != map_.end(); ++i) { gu_trace(offset = key(i).serialize(buf, buflen, offset)); gu_trace(offset = value(i).serialize(buf, buflen, offset)); } return offset; } size_t unserialize(const gu::byte_t* buf, size_t const buflen, size_t offset) { uint32_t len; // Clear map in case this object is reused map_.clear(); gu_trace(offset = gu::unserialize4(buf, buflen, offset, len));; for (uint32_t i = 0; i < len; ++i) { K k; V v; gu_trace(offset = k.unserialize(buf, buflen, offset)); gu_trace(offset = v.unserialize(buf, buflen, offset)); if (map_.insert(std::make_pair(k, v)).second == false) { gu_throw_fatal << "Failed to unserialize map"; } } return offset; } size_t serial_size() const { return sizeof(uint32_t) + size()*(K::serial_size() + V::serial_size()); } bool operator==(const MapBase& other) const { return (map_ == other.map_); } bool operator!=(const MapBase& other) const { return !(map_ == other.map_); } static const K& key(const_iterator i) { return i->first; } static const K& key(iterator i) { return i->first; } static const V& value(const_iterator i) { return i->second; } static V& value(iterator i) { return i->second; } static const K& key(const value_type& vt) { return vt.first; } static V& value(value_type& vt) { return vt.second; } static const V& value(const value_type& vt) { return vt.second; } }; // @todo For some reason map key must be declared in gcomm namespace // in order this to work. Find out the reason why and fix. template std::ostream& operator<<(std::ostream& os, const std::pair& p) { return (os << "\t" << p.first << "," << p.second << "\n"); } template std::ostream& operator<<(std::ostream& os, const MapBase& map) { std::copy(map.begin(), map.end(), std::ostream_iterator >(os, "")); return os; } template > class Map : public MapBase { public: typedef typename MapBase::iterator iterator; std::pair insert(const std::pair& p) { return MapBase::map_.insert(p); } iterator insert(iterator pos, const std::pair& p) { return MapBase::map_.insert(pos, p); } template void insert(InputIterator first, InputIterator last) { MapBase::map_.insert(first, last); } iterator insert_unique(const typename MapBase::value_type& p) { std::pair ret = MapBase::map_.insert(p); if (false == ret.second) { gu_throw_fatal << "duplicate entry " << "key=" << MapBase::key(p) << " " << "value=" << MapBase::value(p) << " " << "map=" << *this; } return ret.first; } }; template > class MultiMap : public MapBase { public: typedef typename MapBase::iterator iterator; typedef typename MapBase::const_iterator const_iterator; typedef typename MapBase::value_type value_type; typedef typename MapBase::const_reference const_reference; iterator insert(const std::pair& p) { return MapBase::map_.insert(p); } iterator insert(iterator position, const value_type& vt) { return MapBase::map_.insert(position, vt); } std::pair equal_range(const K& k) const { return MapBase::map_.equal_range(k); } }; } #endif /* GCOMM_MAP_HPP */ galera-4-26.4.25/gcomm/src/gcomm/conf.hpp000644 000164 177776 00000047533 15107057155 021161 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ /*! * @file conf.hpp * * @brief Configuration parameters and utility templates. */ #ifndef GCOMM_CONF_HPP #define GCOMM_CONF_HPP #include "gu_config.hpp" #include "gu_uri.hpp" #include "gu_throw.hpp" namespace gcomm { /*! * Configuration parameter definitions. * * Transport definition and configuration parameters are passed to * Transport::create() in the URI form. URI scheme part defines * which transport is returned. Currently recognized are "tcp", "gmcast" * and "pc". This will change in the future. * * URI format is the following: * gcomm://[[:]][?=&=]... * The key/value pairs can be used to pass configuration parameters to * gcomm layers. * * Time periods as parameter values follow ISO8601 duration representation * (as represented in http://en.wikipedia.org/wiki/ISO_8601#Durations). * Examples: * - PT1S - one second * - PT1M30S = one minute 30 secs * - P1DT6H = one day, 6 hours * * To get subsecond resolution, second part can be represented as decimal * number, but currently it is not recommended due to bug in Period * parsing routine (rounding errors can result inaccurate boundary * value checking). */ struct Conf { static std::string const ProtonetBackend; static std::string const ProtonetVersion; /*! * @brief TCP non-blocking flag ("socket.non_blocking") * * Parameter value is boolean (passed 0 or 1) denoting whether * the socket should or should not be in non-blocking state. */ static std::string const TcpNonBlocking; /*! * @brief Algorithm for message checksums: * 0 - none (backward compatible) * 1 - CRC-32 (backward compatible) * 2 - CRC-32C (optimized and potentially HW-accelerated on Intel CPUs) */ static std::string const SocketChecksum; /*! * @brief Socket receive buffer size in bytes */ static std::string const SocketRecvBufSize; /*! * @brief Socket send buffer size in bytes. */ static std::string const SocketSendBufSize; /*! * @brief GMCast scheme for transport URI ("gmcast") */ static std::string const GMCastScheme; /*! * @brief GMCast protocol version */ static std::string const GMCastVersion; /*! * @brief GMCast group name ("gmcast.group") * * String denoting group name. Max length of string is 16. Peer nodes * accept GMCast connection only if the group names match. */ static std::string const GMCastGroup; /*! * @brief GMCast listening address ("gmcast.listen_addr") * * Listening address for GMCast. Address is currently passed in * URI format (for example tcp://192.168.3.1:4567) and it should * be passed as the last configuration parameter in order to * avoid confusion. If parameter value is undefined, GMCast * starts listening all interfaces at default port 4567. */ static std::string const GMCastListenAddr; /*! * @brief GMCast multicast address ("gmcast.mcast_addr") * * Multicast address for GMCast. By default multicast socket * is bound to the same interface as conf::GMCastListenAddr. * If multicast interface must be specified, the only way * to do it is currently via listening address configuration. */ static std::string const GMCastMCastAddr; /*! * @brief GMCast multicast port ("gmcast.mcast_port") * * Multicast port for GMCast. By default multicast uses the * same port as GMCast TCP connections. */ static std::string const GMCastMCastPort; /*! * @brief GMCast multicast TTL ("gmcast.mcast_ttl") * * This parameter controls multicast packet TTL. By default it * is set to 1 and usually it should not be changed unless * advised so. This means that multicast is limited to single LAN * segment. */ static std::string const GMCastMCastTTL; static std::string const GMCastTimeWait; static std::string const GMCastPeerTimeout; /*! * @brief Maximum initial reconnect attempts * * Maximum initial reconnect attempts for address reported by peer. */ static std::string const GMCastMaxInitialReconnectAttempts; /*! * @brief Add or remove peer address. * * Setting value to add:://: will inject new peer * address in address list. Setting value to del:://: * will remove peer address from list (via forget procedure). */ static std::string const GMCastPeerAddr; /*! * @brief Isolate node from peers * * Setting this value to 'true' closes all connections * and will prevent forming of new connections until * value is set again to 'false'. This parameter should be * used for testing purposes only and it will not be visible * in global configuration array. */ static std::string const GMCastIsolate; /*! * @brief Segment identifier for segmentation. */ static std::string const GMCastSegment; /*! * @brief EVS scheme for transport URI ("evs") */ static std::string const EvsScheme; /*! * @brief EVS protocol version */ static std::string const EvsVersion; /*! * @brief EVS view forget timeout ("evs.view_forget_timeout") * * This timeout controls how long information about * known group views is maintained. This information is needed * to filter out delayed messages from previous views that are not * live anymore. Default value is 5 minutes and there is usually not * need to change it. */ static std::string const EvsViewForgetTimeout; /*! * @brief EVS suspect timeout ("evs.suspect_timeout") * * This timeout controls how long node can remain silent until * it is put under suspicion. If majority of the current group * agree that the node is under suspicion, it is discarded from * group and new group view is formed immediately. If majority * of the group does not agree about suspicion, Conf::EvsInactiveTimeout * is waited until forming of new group will be attempted. * Default value is 5 seconds. */ static std::string const EvsSuspectTimeout; /*! * @brief EVS inactive timeout ("evs.inactive_timeout") * * This timeout control how long node can remain completely silent * until it is discarded from the group. This is hard limit, unlike * Conf::EvsSuspectTimeout, and the node is discarded even if it * becomes live during the formation of the new group. Default value * is 15 seconds. */ static std::string const EvsInactiveTimeout; /*! * @brief EVS inactive check period ("evs.inactive_check_period") * * This period controls how often node liveness is checked. Default * is 1 second and there is no need to change this unless * Conf::EvsSuspectTimeout or Conf::EvsInactiveTimeout is adjusted * to smaller value. Default value is 1 second, minimum is 0.1 seconds * and maximum is Conf::EvsSuspectTimeout/2. */ static std::string const EvsInactiveCheckPeriod; static std::string const EvsInstallTimeout; /*! * @brief EVS keepalive period ("evs.keepalive_period") * * This timeout controls how often keepalive messages are * sent into network. Node liveness is determined with * these keepalives, so the value sould be significantly smaller * than Conf::EvsSuspectTimeout. Default value is 1 second, * minimum is 0.1 seconds and maximum is Conf::EvsSuspectTimeout/3. */ static std::string const EvsKeepalivePeriod; /*! * @brief EVS join retransmission period ("evs.join_retrans_period") * * This parameter controls how often join messages are retransmitted * during group formation. There is usually no need to adjust * this value. Default value is 0.3 seconds, minimum is 0.1 seconds * and maximum is Conf::EvsSuspectTimeout/3. */ static std::string const EvsJoinRetransPeriod; /*! * @brief EVS statistics reporting period ("evs.stats_report_period") * * This parameters controls how often statistics information is * printed in the log. This parameter has effect only if * statistics reporting is enabled via Conf::EvsInfoLogMask. Default * value is 1 minute. */ static std::string const EvsStatsReportPeriod; /*! * @brief EVS debug log mask ("evs.debug_log_mask") * * This mask controls what debug information is printed in the logs * if debug logging is turned on. Mask value is bitwise-or * from values gcomm::evs::Proto::DebugFlags. By default only * state information is printed. */ static std::string const EvsDebugLogMask; /*! * @brief EVS info log mask ("evs.info_log_mask") * * This mask controls what info log is printed in the logs. * Mask value is bitwise-or from values gcomm::evs::Proto::InfoFlags. */ static std::string const EvsInfoLogMask; /*! * @brief EVS send window ("evs.send_window") * * This parameter controls how many messages protocol layer is * allowed to send without getting all acknowledgements for any of them. * Default value is 32. */ static std::string const EvsSendWindow; /*! * @brief EVS user send window ("evs.user_send_window") * * Like Conf::EvsSendWindow, but for messages for which sending * is initiated by call from upper layer. Default value is 16. */ static std::string const EvsUserSendWindow; /*! * @brief EVS message aggregation mode ("evs.use_aggregate") * * This parameter controls whether EVS is allowed to aggregate * several user messages into one message. By default this option * is enabled and there should be no need to disable it unless * advised so. */ static std::string const EvsUseAggregate; /*! * @brief Period to generate keepalives for causal messages * */ static std::string const EvsCausalKeepalivePeriod; /*! * @brief EVS maximum install timeouts ("evs.max_install_timeouts") * * This parameter controls how many install attempts are done * before declaring other nodes as inactive and trying to re-establish * group via singleton views. */ static std::string const EvsMaxInstallTimeouts; /*! * @brief Margin over keepalive period after which node is declared * delayed. This should be greater than the largest RTT * between cluster nodes. */ static std::string const EvsDelayMargin; /*! * @brief Period which determines how long delayed node is kept in * delayed list after it becomes responsive again. * * The actual time that node stays in delayed list is * EvsDelayedKeepPeriod times the number of changes between * OK and DELAYED state. */ static std::string const EvsDelayedKeepPeriod; /*! * @brief List of nodes (UUIDs) that should be evicted permanently from * cluster. * * Setting value to nil UUID will clear the evict list. */ static std::string const EvsEvict; /*! * @brief Autoevict threshold. */ static std::string const EvsAutoEvict; /*! * @brief PC scheme for transport URI ("pc") */ static std::string const PcScheme; /*! * @brief PC protocol version */ static std::string const PcVersion; /*! * @brief PC split-brain mode * * This parameter controls whether PC is allowed to continue * operation despite of possible split brain condition. */ static std::string const PcIgnoreSb; /*! * @brief PC quorum mode * * This parameter controls whether PC is allowed to continue * operation despite of quorum loss. */ static std::string const PcIgnoreQuorum; /*! * @brief PC message checksumming * * This parameter controls whether PC layer does message * checksumming. */ static std::string const PcChecksum; /*! * @brief PC starup announce timeout */ static std::string const PcAnnounceTimeout; /*! * @brief PC close linger timeout */ static std::string const PcLinger; /*! * @brief PC newer prim view overrides */ static std::string const PcNpvo; /*! * @brief If set during runtime bootstraps new PC */ static std::string const PcBootstrap; /*! * @brief Wait for prim comp unconditionally if set to true */ static std::string const PcWaitPrim; /*! * @brief Timeout on waiting for primary component */ static std::string const PcWaitPrimTimeout; /*! * @brief Node weight in prim comp voting */ static std::string const PcWeight; /*! * @brief PC recovery from cluster crash */ static std::string const PcRecovery; static void register_params(gu::Config&); static void check_params(const gu::Config&); struct Check { Check(gu::Config& conf) { check_params(conf); } virtual ~Check() {} // to pacify older GCCs with -Werror=effc++ }; static size_t check_recv_buf_size(const std::string& val); static size_t check_send_buf_size(const std::string& val); }; // Helper templates to read configuration parameters. template T _conf_param(const gu::URI& uri, const std::string& param, const T* default_value = 0, const T* min_value = 0, const T* max_value = 0) { T ret; try { ret = gu::from_string(uri.get_option(param)); } catch (gu::NotFound& e) { // cppcheck-suppress nullPointer if (default_value == 0) { gu_throw_error(EINVAL) << "param " << param << " not found from uri " << uri.to_string(); } // cppcheck-suppress nullPointer ret = *default_value; } if (min_value != 0 && *min_value > ret) { gu_throw_error(EINVAL) << "param " << param << " value " << ret << " out of range " << "min allowed " << *min_value; } if (max_value != 0 && *max_value < ret) { gu_throw_error(EINVAL) << "param " << param << " value " << ret << " out of range " << "max allowed " << *max_value; } return ret; } template T conf_param(const gu::URI& uri, const std::string& param) { return _conf_param(uri, param, 0, 0, 0); } template T conf_param_def(const gu::URI& uri, const std::string& param, const T& default_value) { return _conf_param(uri, param, &default_value); } template T conf_param_range(const gu::URI& uri, const std::string& param, const T& min_value, const T& max_value) { return _conf_param(uri, param, 0, &min_value, &max_value); } template T conf_param_def_min(const gu::URI& uri, const std::string& param, const T& default_value, const T& min_value) { return _conf_param(uri, param, &default_value, &min_value); } template T conf_param_def_max(const gu::URI& uri, const std::string& param, const T& default_value, const T& max_value) { return _conf_param(uri, param, &default_value, reinterpret_cast(0), &max_value); } template T conf_param_def_range(const gu::URI& uri, const std::string& param, const T& default_value, const T& min_value, const T& max_value) { return _conf_param(uri, param, &default_value, &min_value, &max_value); } template T param(gu::Config& conf, const gu::URI& uri, const std::string& key, const std::string& def, std::ios_base& (*f)(std::ios_base&) = std::dec) { T ret; try { std::string cnf(conf.get(key, def)); std::string val(uri.get_option(key, cnf)); try { ret = gu::from_string(val, f); } catch (gu::NotFound) { gu_throw_error(EINVAL) << "Bad value '" << val << "' for parameter '" << key << "'"; } } catch (gu::NotFound) { gu_throw_error(EINVAL) << "Unrecognized parameter '" << key << "'"; } return ret; } template T check_range(const std::string& key, const T& val, const T& min, const T& max) { if (val < min || val >= max) { gu_throw_error(ERANGE) << "parameter '" << key << "' value " << val << " is out of range [" << min << "," << max << ")"; } return val; } template T check_range(const std::string& key, const std::string& val, const T& min, const T& max) { return check_range(key, gu::Config::from_config(val), min, max); } template T check_range(const gu::Config& conf, const std::string& key, const T& min, const T& max) { return check_range(key, conf.get(key), min, max); } } // namespace gcomm #endif // GCOMM_CONF_HPP galera-4-26.4.25/gcomm/src/gcomm/datagram.hpp000644 000164 177776 00000021700 15107057155 022000 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2013 Codership Oy */ #ifndef GCOMM_DATAGRAM_HPP #define GCOMM_DATAGRAM_HPP #include "gu_buffer.hpp" #include "gu_serialize.hpp" #include "gu_utils.hpp" #include "gu_throw.hpp" #include #include #include #include #include namespace gcomm { //! // @class NetHeader // // @brief Header for datagrams sent over network // // Header structure is the following (MSB first) // // | version(4) | reserved(2) | F_CRC(2) | len(24) | // | CRC(32) | // class NetHeader { public: typedef enum checksum { CS_NONE = 0, CS_CRC32, CS_CRC32C } checksum_t; static checksum_t checksum_type (int i); NetHeader() : len_(), crc32_() { } NetHeader(uint32_t len, int version) : len_(len), crc32_(0) { if (len > len_mask_) gu_throw_error(EINVAL) << "msg too long " << len_; len_ |= (static_cast(version) << version_shift_); } uint32_t len() const { return (len_ & len_mask_); } void set_crc32(uint32_t crc32, checksum_t type) { assert (CS_CRC32 == type || CS_CRC32C == type); crc32_ = crc32; CS_CRC32 == type ? len_ |= F_CRC32 : len_ |= F_CRC32C; } bool has_crc32() const { return (len_ & F_CRC32); } bool has_crc32c() const { return (len_ & F_CRC32C); } uint32_t crc32() const { return crc32_; } int version() const { return ((len_ & version_mask_) >> version_shift_); } friend size_t serialize(const NetHeader& hdr, gu::byte_t* buf, size_t buflen, size_t offset); friend size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, NetHeader& hdr); friend size_t serial_size(const NetHeader& hdr); static const size_t serial_size_ = 8; private: static const uint32_t len_mask_ = 0x00ffffff; static const uint32_t flags_mask_ = 0x0f000000; static const uint32_t flags_shift_ = 24; static const uint32_t version_mask_ = 0xf0000000; static const uint32_t version_shift_ = 28; static const uint32_t F_CRC32 = 1 << 24; /* backward compatible */ static const uint32_t F_CRC32C = 1 << 25; uint32_t len_; uint32_t crc32_; }; inline size_t serialize(const NetHeader& hdr, gu::byte_t* buf, size_t buflen, size_t offset) { offset = gu::serialize4(hdr.len_, buf, buflen, offset); offset = gu::serialize4(hdr.crc32_, buf, buflen, offset); return offset; } inline size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset, NetHeader& hdr) { offset = gu::unserialize4(buf, buflen, offset, hdr.len_); offset = gu::unserialize4(buf, buflen, offset, hdr.crc32_); switch (hdr.version()) { case 0: if ((hdr.len_ & NetHeader::flags_mask_) & ~(NetHeader::F_CRC32 | NetHeader::F_CRC32C)) { gu_throw_error(EPROTO) << "invalid flags " << ((hdr.len_ & NetHeader::flags_mask_) >> NetHeader::flags_shift_); } break; default: gu_throw_error(EPROTO) << "invalid protocol version " << hdr.version(); } return offset; } inline size_t serial_size(const NetHeader& hdr) { return NetHeader::serial_size_; } /*! * @brief Datagram container * * Datagram class provides consistent interface for managing * datagrams/byte buffers. */ class Datagram { public: Datagram() : header_ (), header_offset_(header_size_), payload_ (new gu::Buffer()), offset_ (0) { } /*! * @brief Construct new datagram from byte buffer * * @param[in] buf Const pointer to data buffer * @param[in] buflen Length of data buffer * * @throws std::bad_alloc */ Datagram(const gu::Buffer& buf, size_t offset = 0) : header_ (), header_offset_(header_size_), payload_ (new gu::Buffer(buf)), offset_ (offset) { assert(offset_ <= payload_->size()); } Datagram(const gu::SharedBuffer& buf, size_t offset = 0) : header_ (), header_offset_(header_size_), payload_ (buf), offset_ (offset) { assert(offset_ <= payload_->size()); } /*! * @brief Copy constructor. * * @note Only for normalized datagrams. * * @param[in] dgram Datagram to make copy from * @param[in] off * @throws std::bad_alloc */ Datagram(const Datagram& dgram, size_t off = std::numeric_limits::max()) : header_offset_(dgram.header_offset_), payload_(dgram.payload_), offset_(off == std::numeric_limits::max() ? dgram.offset_ : off) { assert(offset_ <= dgram.len()); memcpy(header_ + header_offset_, dgram.header_ + dgram.header_offset(), dgram.header_len()); } friend void swap(Datagram& lhs, Datagram& rhs) { using std::swap; swap(lhs.header_offset_, rhs.header_offset_); swap(lhs.header_, rhs.header_); swap(lhs.payload_, rhs.payload_); swap(lhs.offset_, rhs.offset_); } Datagram& operator=(Datagram other) { swap(*this, other); return *this; } /*! * @brief Destruct datagram */ ~Datagram() { } void normalize() { const gu::SharedBuffer old_payload(payload_); payload_ = gu::SharedBuffer(new gu::Buffer); payload_->reserve(header_len() + old_payload->size() - offset_); if (header_len() > offset_) { payload_->insert(payload_->end(), header_ + header_offset_ + offset_, header_ + header_size_); offset_ = 0; } else { offset_ -= header_len(); } header_offset_ = header_size_; payload_->insert(payload_->end(), old_payload->begin() + gu::Buffer::difference_type(offset_), old_payload->end()); offset_ = 0; } gu::byte_t* header() { return header_; } const gu::byte_t* header() const { return header_; } size_t header_size() const { return header_size_; } size_t header_len() const { return (header_size_ - header_offset_); } size_t header_offset() const { return header_offset_; } void set_header_offset(const size_t off) { // assert(off <= header_size_); if (off > header_size_) gu_throw_fatal << "out of hdrspace"; header_offset_ = off; } const gu::Buffer& payload() const { assert(payload_); return *payload_; } gu::Buffer& payload() { assert(payload_); return *payload_; } size_t len() const { return (header_size_ - header_offset_ + payload_->size()); } size_t offset() const { return offset_; } private: friend uint16_t crc16(const Datagram&, size_t); friend uint32_t crc32(NetHeader::checksum_t, const Datagram&, size_t); static const size_t header_size_ = 128; gu::byte_t header_[header_size_]; size_t header_offset_; gu::SharedBuffer payload_; size_t offset_; }; uint16_t crc16(const Datagram& dg, size_t offset = 0); uint32_t crc32(NetHeader::checksum_t type, const Datagram& dg, size_t offset = 0); /* returns true if checksum fails */ inline bool check_cs (const NetHeader& hdr, const Datagram& dg) { if (hdr.has_crc32c()) return (crc32(NetHeader::CS_CRC32C, dg) != hdr.crc32()); if (hdr.has_crc32()) return (crc32(NetHeader::CS_CRC32, dg) != hdr.crc32()); return (hdr.crc32() != 0); } } /* namespace gcomm */ #endif // GCOMM_DATAGRAM_HPP galera-4-26.4.25/gcomm/src/gcomm/types.hpp000644 000164 177776 00000004310 15107057155 021362 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ #ifndef _GCOMM_TYPES_HPP_ #define _GCOMM_TYPES_HPP_ #include "gcomm/exception.hpp" #include "gu_byteswap.hpp" #include "gu_buffer.hpp" #include #include #include namespace gcomm { template class String { public: String(const std::string& str = "") : str_(str) { if (str_.size() > str_size_) { gu_throw_error(EMSGSIZE); } } virtual ~String() { } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { if (buflen < offset + str_size_) { gu_throw_error (EMSGSIZE) << str_size_ << " > " << (buflen-offset); } std::string ser_str(str_); ser_str.resize(str_size_, '\0'); (void)std::copy(ser_str.data(), ser_str.data() + ser_str.size(), buf + offset); return offset + str_size_; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { if (buflen < offset + str_size_) { gu_throw_error (EMSGSIZE) << str_size_ << " > " << (buflen-offset); } str_.assign(reinterpret_cast(buf) + offset, str_size_); const size_t tc(str_.find_first_of('\0')); if (tc != std::string::npos) { str_.resize(tc); } return offset + str_size_; } static size_t serial_size() { return str_size_; } const std::string& to_string() const { return str_; } bool operator==(const String& cmp) const { return (str_ == cmp.str_); } private: static const size_t str_size_ = SZ ; std::string str_; /* Human readable name if any */ }; template inline std::ostream& operator<<(std::ostream& os, const String& str) { return (os << str.to_string()); } } // namespace gcomm #endif /* _GCOMM_TYPES_HPP_ */ galera-4-26.4.25/gcomm/src/gcomm/protolay.hpp000644 000164 177776 00000027540 15107057155 022101 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ /*! * @file protolay.hpp * * @brief Protocol layer interface definitions. * * Protocol layer interface allows construction of protocol stacks * with consistent interface to send messages upwards or downwards in * stack. */ #ifndef GCOMM_PROTOLAY_HPP #define GCOMM_PROTOLAY_HPP #include "gcomm/view.hpp" #include "gcomm/exception.hpp" #include "gcomm/order.hpp" #include "gcomm/datagram.hpp" #include "gu_logger.hpp" #include "gu_datetime.hpp" #include "gu_config.hpp" #include "gu_status.hpp" #include #include #include #include // Declarations namespace gcomm { /*! * @class ProtoUpMeta * * Container for metadata passed upwards in protocol stack. */ class ProtoUpMeta; std::ostream& operator<<(std::ostream&, const ProtoUpMeta&); /*! * @class ProtoDownMeta * * Container for metadata passed downwards in protocol stack. */ class ProtoDownMeta; /*! * @class Protolay * * Protocol layer interface. */ class Protolay; /*! * @class Toplay * * Protolay that is on the top of the protocol stack. */ class Toplay; /*! * @class Bottomlay * * Protolay that is on the bottom of the protocol stack. */ class Bottomlay; void connect(Protolay*, Protolay*); void disconnect(Protolay*, Protolay*); } /* message context to pass up with the data buffer? */ class gcomm::ProtoUpMeta { public: ProtoUpMeta(const int err_no) : source_(), source_view_id_(), user_type_(), order_(), to_seq_(), err_no_(err_no), view_(0) { } ProtoUpMeta(const UUID source = UUID::nil(), const ViewId source_view_id = ViewId(), const View* view = 0, const uint8_t user_type = 0xff, const Order order = O_DROP, const int64_t to_seq = -1, const int err_no = 0) : source_ (source ), source_view_id_ (source_view_id ), user_type_ (user_type ), order_ (order ), to_seq_ (to_seq ), err_no_ (err_no ), view_ (view != 0 ? new View(*view) : 0) { } ProtoUpMeta(const ProtoUpMeta& um) : source_ (um.source_ ), source_view_id_ (um.source_view_id_ ), user_type_ (um.user_type_ ), order_ (um.order_ ), to_seq_ (um.to_seq_ ), err_no_ (um.err_no_ ), view_ (um.view_ ? new View(*um.view_) : 0) { } ~ProtoUpMeta() { delete view_; } const UUID& source() const { return source_; } const ViewId& source_view_id() const { return source_view_id_; } uint8_t user_type() const { return user_type_; } Order order() const { return order_; } int64_t to_seq() const { return to_seq_; } int err_no() const { return err_no_; } bool has_view() const { return view_ != 0; } const View& view() const { return *view_; } private: ProtoUpMeta& operator=(const ProtoUpMeta&); UUID const source_; ViewId const source_view_id_; uint8_t const user_type_; Order const order_; int64_t const to_seq_; int const err_no_; View* const view_; }; inline std::ostream& gcomm::operator<<(std::ostream& os, const ProtoUpMeta& um) { os << "proto_up_meta: { "; if (not (um.source() == UUID::nil())) { os << "source=" << um.source() << ","; } if (um.source_view_id().type() != V_NONE) { os << "source_view_id=" << um.source_view_id() << ","; } os << "user_type=" << static_cast(um.user_type()) << ","; os << "to_seq=" << um.to_seq() << ","; if (um.has_view() == true) { os << "view=" << um.view(); } os << "}"; return os; } /* message context to pass down? */ class gcomm::ProtoDownMeta { public: ProtoDownMeta(const uint8_t user_type = 0xff, const Order order = O_SAFE, const UUID& source = UUID::nil(), const int segment = 0) : user_type_ (user_type), order_ (order), source_ (source), target_ (UUID::nil()), segment_ (segment) { } ProtoDownMeta(const UUID& target) : user_type_(0xff) , order_ (O_SAFE) , source_(UUID::nil()) , target_(target) , segment_(0) { } uint8_t user_type() const { return user_type_; } Order order() const { return order_; } const UUID& source() const { return source_; } const UUID& target() const { return target_; } int segment() const { return segment_; } private: const uint8_t user_type_; const Order order_; const UUID source_; const UUID target_; const int segment_; }; class gcomm::Protolay { public: typedef Map EvictList; typedef boost::function sync_param_cb_t; virtual ~Protolay() {} virtual void connect(bool) { } virtual void close(bool force = false) { } virtual void close(const UUID& uuid) { } /* apparently handles data from upper layer. what is return value? */ virtual int handle_down (Datagram&, const ProtoDownMeta&) = 0; virtual void handle_up (const void*, const Datagram&, const ProtoUpMeta&) = 0; void set_up_context(Protolay *up) { if (std::find(up_context_.begin(), up_context_.end(), up) != up_context_.end()) { gu_throw_fatal << "up context already exists"; } up_context_.push_back(up); } void set_down_context(Protolay *down) { if (std::find(down_context_.begin(), down_context_.end(), down) != down_context_.end()) { gu_throw_fatal << "down context already exists"; } down_context_.push_back(down); } void unset_up_context(Protolay* up) { CtxList::iterator i; if ((i = std::find(up_context_.begin(), up_context_.end(), up)) == up_context_.end()) { gu_throw_fatal << "up context does not exist"; } up_context_.erase(i); } void unset_down_context(Protolay* down) { CtxList::iterator i; if ((i = std::find(down_context_.begin(), down_context_.end(), down)) == down_context_.end()) { gu_throw_fatal << "down context does not exist"; } down_context_.erase(i); } /* apparently passed data buffer to the upper layer */ void send_up(const Datagram& dg, const ProtoUpMeta& up_meta) { if (up_context_.empty() == true) { gu_throw_fatal << this << " up context(s) not set"; } CtxList::iterator i, i_next; for (i = up_context_.begin(); i != up_context_.end(); i = i_next) { i_next = i, ++i_next; (*i)->handle_up(this, dg, up_meta); } } /* apparently passes data buffer to lower layer, what is return value? */ int send_down(Datagram& dg, const ProtoDownMeta& down_meta) { if (down_context_.empty() == true) { return ENOTCONN; } int ret = 0; for (CtxList::iterator i = down_context_.begin(); i != down_context_.end(); ++i) { const size_t hdr_offset(dg.header_offset()); int err = (*i)->handle_down(dg, down_meta); // Verify that lower layer rolls back any modifications to // header if (hdr_offset != dg.header_offset()) { gu_throw_fatal; } if (err != 0) { ret = err; } } return ret; } virtual void handle_stable_view(const View& view) { } void set_stable_view(const View& view) { for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->handle_stable_view(view); } } virtual void handle_allow_connect(const UUID& uuid) { } /* Allow connections from remote node identified by uuid. */ void allow_connect(const UUID& uuid) { handle_allow_connect(uuid); for (auto& i : down_context_) { i->handle_allow_connect(uuid); } } virtual void handle_evict(const UUID& uuid) { } void evict(const UUID& uuid) { evict_list_.insert( std::make_pair(uuid, gu::datetime::Date::monotonic())); handle_evict(uuid); for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->evict(uuid); } } void unevict(const UUID& uuid) { evict_list_.erase(uuid); for (CtxList::iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->unevict(uuid); } } bool is_evicted(const UUID& uuid) const { if (down_context_.empty()) { return (evict_list_.find(uuid) != evict_list_.end()); } else { return (*down_context_.begin())->is_evicted(uuid); } } const EvictList& evict_list() const { return evict_list_; } virtual void handle_get_status(gu::Status& status) const { } void get_status(gu::Status& status) const { for (CtxList::const_iterator i(down_context_.begin()); i != down_context_.end(); ++i) { (*i)->get_status(status); } handle_get_status(status); } std::string get_address(const UUID& uuid) const { if (down_context_.empty()) return handle_get_address(uuid); else return (*down_context_.begin())->get_address(uuid); } virtual std::string handle_get_address(const UUID& uuid) const { return "(unknown)"; } virtual gu::datetime::Date handle_timers() { return gu::datetime::Date::max(); } virtual bool set_param(const std::string& key, const std::string& val, sync_param_cb_t& sync_param_cb) { return false; } const Protolay* id() const { return this; } protected: Protolay(gu::Config& conf) : conf_(conf), up_context_(0), down_context_(0), evict_list_() { } gu::Config& conf_; private: typedef std::list CtxList; CtxList up_context_; CtxList down_context_; EvictList evict_list_; Protolay (const Protolay&); Protolay& operator=(const Protolay&); }; class gcomm::Toplay : protected Conf::Check, public Protolay { public: Toplay(gu::Config& conf) : Conf::Check(conf), Protolay(conf) { } private: int handle_down(Datagram& dg, const ProtoDownMeta& dm) { gu_throw_fatal << "Toplay handle_down() called"; } }; class gcomm::Bottomlay : public Protolay { public: Bottomlay(gu::Config& conf) : Protolay(conf) { } private: void handle_up(const void* id, const Datagram&, const ProtoUpMeta& um) { gu_throw_fatal << "Bottomlay handle_up() called"; } }; inline void gcomm::connect(Protolay* down, Protolay* up) { down->set_up_context(up); up->set_down_context(down); } inline void gcomm::disconnect(Protolay* down, Protolay* up) { down->unset_up_context(up); up->unset_down_context(down); } #endif /* GCOMM_PROTOLAY_HPP */ galera-4-26.4.25/gcomm/src/gcomm/protostack.hpp000644 000164 177776 00000001661 15107057155 022415 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_PROTOSTACK_HPP #define GCOMM_PROTOSTACK_HPP #include "gcomm/protolay.hpp" #include "gu_lock.hpp" #include #include namespace gcomm { class Socket; class Acceptor; class Protostack; class Protonet; class BoostProtonet; } class gcomm::Protostack { public: Protostack() : protos_(), mutex_() { } void push_proto(Protolay* p); void pop_proto(Protolay* p); gu::datetime::Date handle_timers(); void dispatch(const void* id, const Datagram& dg, const ProtoUpMeta& um); bool set_param(const std::string&, const std::string&, Protolay::sync_param_cb_t& sync_param_cb); void enter() { mutex_.lock(); } void leave() { mutex_.unlock(); } private: friend class Protonet; std::deque protos_; gu::Mutex mutex_; }; #endif // GCOMM_PROTOSTACK_HPP galera-4-26.4.25/gcomm/src/gcomm/transport.hpp000644 000164 177776 00000005322 15107057155 022256 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ /*! * @file transport.hpp * * @brief Transport interface. */ #ifndef _GCOMM_TRANSPORT_HPP_ #define _GCOMM_TRANSPORT_HPP_ #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/protostack.hpp" #include "gcomm/protonet.hpp" #include "gu_uri.hpp" namespace gcomm { /*! * @class Transport * * @brief Transport interface */ class Transport; } /*! * */ class gcomm::Transport : public Protolay { public: virtual ~Transport(); virtual size_t mtu() const = 0; virtual const UUID& uuid() const = 0; virtual std::string local_addr() const; virtual std::string remote_addr() const; int err_no() const; virtual void connect(bool start_prim) { gu_throw_fatal << "connect(start_prim) not supported"; } virtual void connect() // if not overloaded, will default to connect(bool) { connect(false); } virtual void connect(const gu::URI& uri) { gu_throw_fatal << "connect(URI) not supported"; } virtual void close(bool force = false) = 0; virtual void close(const UUID& uuid) { gu_throw_error(ENOTSUP) << "close(UUID) not supported by " << uri_.get_scheme(); } virtual void listen(); // Get configured listen addr. For test purposes only. virtual std::string configured_listen_addr() const { gu_throw_fatal << "not supported"; } virtual std::string listen_addr() const { gu_throw_fatal << "not supported"; } virtual Transport* accept(); virtual void handle_accept(Transport*) { gu_throw_error(ENOTSUP) << "handle_accept() not supported by" << uri_.get_scheme(); } virtual void handle_connect() { gu_throw_error(ENOTSUP) << "handle_connect() not supported by" << uri_.get_scheme(); } virtual int handle_down(Datagram&, const ProtoDownMeta&) = 0; virtual void handle_up (const void*, const Datagram&, const ProtoUpMeta&) = 0; virtual void handle_stable_view(const View& view) { } Protostack& pstack() { return pstack_; } Protonet& pnet() { return pnet_; } static Transport* create(Protonet&, const std::string&); static Transport* create(Protonet&, const gu::URI&); protected: Transport (Protonet&, const gu::URI&); Protostack pstack_; Protonet& pnet_; gu::URI uri_; int error_no_; private: Transport (const Transport&); Transport& operator=(const Transport&); }; #endif // _GCOMM_TRANSPORT_HPP_ galera-4-26.4.25/gcomm/src/gcomm/order.hpp000644 000164 177776 00000002724 15107057155 021340 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! * @file order.hpp * * @brief Message order type enumeration. */ #ifndef GCOMM_ORDER_HPP #define GCOMM_ORDER_HPP namespace gcomm { /*! * @brief Message order type enumeration. */ enum Order { /*! Message will not be delivered, for protocol use only. */ O_DROP = 0, /*! Message delivery is unreliable, for protocol use only. */ O_UNRELIABLE = 1, /*! Message will be delivered in source fifo order. */ O_FIFO = 2, /*! * Message will be delivered in same order on all nodes * if it is delivered. */ O_AGREED = 3, /*! * Message will be delivered in safe order, it is guaranteed * that all the nodes in group have received the message. */ O_SAFE = 4, /*! * Message will be delivered only locally and delivery will fulfill the * following property: * * Let M_c be message tagged with O_LOCAL_CAUSAL ordering requirement. * Any message M_a which is delivered on any node so that delivery * has causal precedence on generating M_c will be delivered locally * before M_c. * * Note that the causality is guaranteed only with respect to * already delivered messages. */ O_LOCAL_CAUSAL = 8 }; } #endif // GCOMM_ORDER_HPP galera-4-26.4.25/gcomm/src/gcomm/view.hpp000644 000164 177776 00000016214 15107057155 021176 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ /*! * @file Group view class (used in the ProtoUpMeta (protolay.hpp) */ #ifndef _GCOMM_VIEW_HPP_ #define _GCOMM_VIEW_HPP_ #include "gcomm/uuid.hpp" #include "gcomm/types.hpp" #include "gcomm/map.hpp" #include "gcomm/conf.hpp" namespace gcomm { typedef enum { V_NONE = -1, V_REG = 0, V_TRANS = 1, V_NON_PRIM = 2, V_PRIM = 3 } ViewType; class ViewId { public: ViewId(const ViewType type = V_NONE, const UUID& uuid = UUID::nil(), const uint32_t seq = 0) : type_(type), uuid_(uuid), seq_ (seq) { } ViewId(const ViewType type, const ViewId& vi) : type_(type), uuid_(vi.uuid()), seq_ (vi.seq()) { } virtual ~ViewId() { } ViewType type() const { return type_; } const UUID& uuid() const { return uuid_; } uint32_t seq() const { return seq_; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset); size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const; static size_t serial_size() { return UUID::serial_size() + sizeof(reinterpret_cast(0)->seq_); } bool operator<(const ViewId& cmp) const { // View ordering: // 1) view seq less than // 2) uuid newer than // 3) type less than return (seq_ < cmp.seq_ || (seq_ == cmp.seq_ && (cmp.uuid_.older(uuid_) || (uuid_ == cmp.uuid_ && type_ < cmp.type_) ) ) ); } bool operator==(const ViewId& cmp) const { return (seq_ == cmp.seq_ && type_ == cmp.type_ && uuid_ == cmp.uuid_); } bool operator!=(const ViewId& cmp) const { return !(*this == cmp); } std::ostream& write_stream(std::ostream& os) const { os << static_cast(type_) << " "; uuid_.print(os); os << " " << seq_; return os; } std::istream& read_stream(std::istream& is) { int t; is >> t; type_ = static_cast(t); uuid_.scan(is); is >> seq_; return is; } private: ViewType type_; UUID uuid_; // uniquely identifies the sequence of group views (?) uint32_t seq_; // position in the sequence (?) }; std::ostream& operator<<(std::ostream&, const ViewId&); typedef uint8_t SegmentId; class Node { public: Node(SegmentId segment = 0) : segment_(segment) { } SegmentId segment() const { return segment_; } bool operator==(const Node& cmp) const { return segment_ == cmp.segment_; } bool operator<(const Node& cmp) const { return segment_ < cmp.segment_; } std::ostream& write_stream(std::ostream& os) const { os << static_cast(segment_); return os; } std::istream& read_stream(std::istream& is) { int seg; is >> seg; segment_ = static_cast(seg); return is; } private: SegmentId segment_; }; inline std::ostream& operator<<(std::ostream& os, const Node& n) { return (os << static_cast(n.segment()) ); } class NodeList : public gcomm::Map { }; class View { public: View() : version_ (-1), bootstrap_ (false), view_id_ (V_NONE), members_ (), joined_ (), left_ (), partitioned_ () { } View(int version, const ViewId& view_id, bool bootstrap = false) : version_ (version), bootstrap_ (bootstrap), view_id_ (view_id), members_ (), joined_ (), left_ (), partitioned_ () { } ~View() {} int version() const { return version_; } void add_member (const UUID& pid, SegmentId segment); void add_members (NodeList::const_iterator begin, NodeList::const_iterator end); void add_joined (const UUID& pid, SegmentId segment); void add_left (const UUID& pid, SegmentId segment); void add_partitioned (const UUID& pid, SegmentId segment); const NodeList& members () const; const NodeList& joined () const; const NodeList& left () const; const NodeList& partitioned () const; NodeList& members() { return members_; } bool is_member(const UUID& uuid) const { return members_.find(uuid) != members_.end(); } bool is_joining(const UUID& uuid) const { return joined_.find(uuid) != joined_.end(); } bool is_leaving(const UUID& uuid) const { return left_.find(uuid) != left_.end(); } bool is_partitioning(const UUID& uuid) const { return partitioned_.find(uuid) != partitioned_.end(); } ViewType type () const; const ViewId& id () const; const UUID& representative () const; bool is_empty() const; bool is_bootstrap() const { return bootstrap_; } std::ostream& write_stream(std::ostream& os) const; std::istream& read_stream(std::istream& is); private: int version_; // view protocol version, derived from evs group bool bootstrap_; // Flag indicating if view was bootstrapped ViewId view_id_; // View identifier NodeList members_; // List of members in view NodeList joined_; // List of newly joined members in view NodeList left_; // Fracefully left members from previous view NodeList partitioned_; // Partitioned members from previous view }; bool operator==(const gcomm::View&, const gcomm::View&); std::ostream& operator<<(std::ostream&, const View&); class ViewState { public: ViewState(UUID& my_uuid, View& view, gu::Config& conf): my_uuid_(my_uuid), view_(view), file_name_(get_viewstate_file_name(conf)) { } std::ostream& write_stream(std::ostream& os) const; std::istream& read_stream(std::istream& is); void write_file() const; bool read_file(); static void remove_file(gu::Config& conf); bool operator== (const ViewState& vst) const { return my_uuid_ == vst.my_uuid_ && view_ == vst.view_; } private: UUID& my_uuid_; View& view_; std::string file_name_; static std::string get_viewstate_file_name(gu::Config& conf); }; } // namespace gcomm #endif // _GCOMM_VIEW_HPP_ galera-4-26.4.25/gcomm/src/gcomm/exception.hpp000644 000164 177776 00000001066 15107057155 022221 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ /*! * @file exception.hpp * * @brief GComm exception definitions. */ #ifndef GCOMM_EXCEPTION_HPP #define GCOMM_EXCEPTION_HPP #include "gu_throw.hpp" /*! * Assert macro for runtime condition checking. This should be used * for conditions that may depend on external input and are required * to validate correct protocol operation. */ #define gcomm_assert(cond_) \ if ((cond_) == false) gu_throw_fatal << #cond_ << ": " #endif // GCOMM_EXCEPTION_HPP galera-4-26.4.25/gcomm/src/pc.hpp000644 000164 177776 00000002547 15107057155 017530 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy */ #include "gcomm/transport.hpp" namespace gcomm { class GMCast; namespace evs { class Proto; } namespace pc { class Proto; } class PC : public Transport { public: PC (Protonet&, const gu::URI&); ~PC(); void connect(bool start_prim = false); void connect(const gu::URI&); std::string listen_addr() const; void close(bool force = false); void handle_up(const void*, const Datagram&, const ProtoUpMeta&); int handle_down(Datagram&, const ProtoDownMeta&); const UUID& uuid() const; size_t mtu() const; void handle_get_status(gu::Status& status) const; private: GMCast* gmcast_; // GMCast transport evs::Proto* evs_; // EVS protocol layer pc::Proto* pc_; // PC protocol layer bool closed_; // flag for destructor // Period to wait graceful leave gu::datetime::Period linger_; gu::datetime::Period announce_timeout_; bool pc_recovery_; UUID rst_uuid_; View rst_view_; PC(const PC&); void operator=(const PC&); }; } // namespace gcomm galera-4-26.4.25/gcomm/src/gmcast_node.hpp000644 000164 177776 00000003233 15107057155 021402 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ #ifndef GMCAST_NODE_HPP #define GMCAST_NODE_HPP #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gu_serialize.hpp" namespace gcomm { namespace gmcast { class Node; std::ostream& operator<<(std::ostream&, const Node&); } } class gcomm::gmcast::Node { public: Node(const std::string& addr = "") : addr_(addr), mcast_addr_("") { } const std::string& addr() const { return addr_.to_string(); } const std::string& mcast_addr() const { return mcast_addr_.to_string(); } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; uint32_t bits; gu_trace (off = gu::unserialize4(buf, buflen, offset, bits)); gu_trace (off = addr_.unserialize(buf, buflen, off)); gu_trace (off = mcast_addr_.unserialize(buf, buflen, off)); return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; uint32_t bits(0); gu_trace (off = gu::serialize4(bits, buf, buflen, offset)); gu_trace (off = addr_.serialize(buf, buflen, off)); gu_trace (off = mcast_addr_.serialize(buf, buflen, off)); return off; } static size_t serial_size() { return (4 + 2 * ADDR_SIZE); } private: static const size_t ADDR_SIZE = 64; gcomm::String addr_; gcomm::String mcast_addr_; }; inline std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const Node& n) { return os; } #endif // GMCAST_NODE_HPP galera-4-26.4.25/gcomm/src/transport.cpp000644 000164 177776 00000003015 15107057155 021144 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ #include "gcomm/transport.hpp" #include "socket.hpp" #include "gmcast.hpp" #include "pc.hpp" #include "gcomm/conf.hpp" // Public methods const gcomm::UUID& gcomm::Transport::uuid() const { gu_throw_fatal << "UUID not supported by " + uri_.get_scheme(); } std::string gcomm::Transport::local_addr() const { gu_throw_fatal << "get local url not supported"; } std::string gcomm::Transport::remote_addr() const { gu_throw_fatal << "get remote url not supported"; } int gcomm::Transport::err_no() const { return error_no_; } void gcomm::Transport::listen() { gu_throw_fatal << "not supported"; } gcomm::Transport* gcomm::Transport::accept() { gu_throw_fatal << "not supported"; } // CTOR/DTOR gcomm::Transport::Transport(Protonet& pnet, const gu::URI& uri) : Protolay(pnet.conf()), pstack_(), pnet_(pnet), uri_(uri), error_no_(0) { } gcomm::Transport::~Transport() { } gcomm::Transport* gcomm::Transport::create(Protonet& pnet, const gu::URI& uri) { const std::string& scheme = uri.get_scheme(); if (scheme == Conf::GMCastScheme) { return new GMCast(pnet, uri); } else if (scheme == Conf::PcScheme) { return new PC(pnet, uri); } gu_throw_fatal << "scheme '" << uri.get_scheme() << "' not supported"; } gcomm::Transport* gcomm::Transport::create(Protonet& pnet, const std::string& uri_str) { return create(pnet, gu::URI(uri_str)); } galera-4-26.4.25/gcomm/src/uuid.cpp000644 000164 177776 00000000244 15107057155 020057 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #include "gcomm/uuid.hpp" const gcomm::UUID gcomm::UUID::uuid_nil_ = gcomm::UUID(GU_UUID_NIL); galera-4-26.4.25/gcomm/src/pc.cpp000644 000164 177776 00000022510 15107057155 017513 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "pc.hpp" #include "pc_proto.hpp" #include "evs_proto.hpp" #include "evs_message2.hpp" #include "gmcast.hpp" #include "defaults.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "gu_datetime.hpp" void gcomm::PC::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (pc_recovery_ && um.err_no() == 0 && um.has_view() && um.view().id().type() == V_PRIM) { ViewState vst(const_cast(uuid()), const_cast(um.view()), conf_); log_info << "save pc into disk"; vst.write_file(); } send_up(rb, um); } int gcomm::PC::handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (wb.len() == 0) { gu_throw_error(EMSGSIZE); } return send_down(wb, dm); } size_t gcomm::PC::mtu() const { // TODO: if (gmcast_ == 0) gu_throw_fatal << "not open"; evs::UserMessage evsm; pc::UserMessage pcm(0, 0); if (gmcast_->mtu() < 2*evsm.serial_size() + pcm.serial_size()) { gu_throw_fatal << "transport max msg size too small: " << gmcast_->mtu(); } return gmcast_->mtu() - 2*evsm.serial_size() - pcm.serial_size(); } const gcomm::UUID& gcomm::PC::uuid() const { return gmcast_->uuid(); } std::string gcomm::PC::listen_addr() const { return gmcast_->listen_addr(); } void gcomm::PC::connect(bool start_prim) { try { // for backward compatibility with old approach: gcomm://0.0.0.0 start_prim = (start_prim || host_is_any (uri_.get_host())); } catch (gu::NotSet& ns) { start_prim = true; } bool wait_prim(param(conf_, uri_, Conf::PcWaitPrim, Defaults::PcWaitPrim)); const gu::datetime::Period wait_prim_timeout( param(conf_, uri_, Conf::PcWaitPrimTimeout, Defaults::PcWaitPrimTimeout)); // --wsrep-new-cluster specified in command line // or cluster address as gcomm://0.0.0.0 or gcomm:// // should take precedence. otherwise it's not able to bootstrap. if (start_prim) { log_info << "start_prim is enabled, turn off pc_recovery"; } else if (rst_view_.type() == V_PRIM) { wait_prim = false; } pstack_.push_proto(gmcast_); pstack_.push_proto(evs_); pstack_.push_proto(pc_); pstack_.push_proto(this); pnet().insert(&pstack_); gmcast_->connect_precheck(start_prim); gmcast_->connect(); closed_ = false; evs_->shift_to(evs::Proto::S_JOINING); pc_->connect(start_prim); // Due to #658 there is limited announce period after which // node is allowed to proceed to non-prim if other nodes // are not detected. gu::datetime::Date try_until( gu::datetime::Date::monotonic() + announce_timeout_); while (start_prim == false && evs_->known_size() <= 1) { // Send join messages without handling them evs_->send_join(false); pnet().event_loop(gu::datetime::Sec/2); if (try_until < gu::datetime::Date::monotonic()) { break; } } log_debug << "PC/EVS Proto initial state: " << *evs_; if (evs_->state() != evs::Proto::S_OPERATIONAL) { log_debug << "PC/EVS Proto sending join request"; evs_->send_join(); } gcomm_assert(evs_->state() == evs::Proto::S_GATHER || evs_->state() == evs::Proto::S_INSTALL || evs_->state() == evs::Proto::S_OPERATIONAL); // - Due to #658 we loop here only if node is told to start in prim. // - Fix for #680, bypass waiting prim only if explicitly required try_until = gu::datetime::Date::monotonic() + wait_prim_timeout; while ((wait_prim == true || start_prim == true) && pc_->state() != pc::Proto::S_PRIM) { pnet().event_loop(gu::datetime::Sec/2); if (try_until < gu::datetime::Date::monotonic()) { pc_->close(); evs_->close(); gmcast_->close(); pnet().erase(&pstack_); pstack_.pop_proto(this); pstack_.pop_proto(pc_); pstack_.pop_proto(evs_); pstack_.pop_proto(gmcast_); gu_throw_error(ETIMEDOUT) << "failed to reach primary view"; } } pc_->set_mtu(mtu()); } void gcomm::PC::connect(const gu::URI& uri) { uri_ = uri; connect(); } void gcomm::PC::close(bool force) { if (force == true) { log_info << "Forced PC close"; gmcast_->close(); // Don't bother closing PC and EVS at this point. Currently // there is no way of knowing why forced close was issued, // so graceful close of PC and/or EVS may not be safe. // pc_->close(); // evs_->close(); } else { log_debug << "PC/EVS Proto leaving"; auto start_time = gu::datetime::Date::monotonic(); pc_->close(); evs_->close(); auto wait_until = start_time + linger_; do { /* Loop in 10ms intervals and check if EVS is closed. * Keep the interval short, event loop does not terminate * even if it runs out of work because of the timer. */ pnet().event_loop(gu::datetime::Sec / 100); } while (evs_->state() != evs::Proto::S_CLOSED && gu::datetime::Date::monotonic() < wait_until); auto evs_termination_time = gu::datetime::Date::monotonic(); log_debug << "EVS termination took " << evs_termination_time - start_time << " left in state " << evs::Proto::to_string(evs_->state()); if (evs_->state() != evs::Proto::S_CLOSED) { evs_->shift_to(evs::Proto::S_CLOSED); } if (pc_->state() != pc::Proto::S_CLOSED) { log_warn << "PCProto didn't reach closed state"; } gmcast_->close(); wait_until = gu::datetime::Date::monotonic() + gu::datetime::Sec; while (gu::datetime::Date::monotonic() < wait_until) { /* Loop in 10ms intervals to process unsent messages. * If no message were processed during that time, it likely * means that all the connections have been closed or that the * receiving side is not able to process messages fast enough. */ size_t count = pnet().event_loop(gu::datetime::Sec / 100); if (count == 0) { break; } } auto stop_time = gu::datetime::Date::monotonic(); log_debug << "PC close took " << stop_time - start_time; } pnet().erase(&pstack_); pstack_.pop_proto(this); pstack_.pop_proto(pc_); pstack_.pop_proto(evs_); pstack_.pop_proto(gmcast_); ViewState::remove_file(conf_); closed_ = true; } void gcomm::PC::handle_get_status(gu::Status& status) const { status.insert("gcomm_uuid", uuid().full_str()); status.insert("cluster_weight", gu::to_string( pc_ ? pc_->cluster_weight() : 0)); status.insert("gmcast_segment", gu::to_string(int(gmcast_->segment()))); } gcomm::PC::PC(Protonet& net, const gu::URI& uri) : Transport (net, uri), gmcast_ (0), evs_ (0), pc_ (0), closed_ (true), linger_ (param( conf_, uri, Conf::PcLinger, Defaults::PcLinger)), announce_timeout_(param( conf_, uri, Conf::PcAnnounceTimeout, Defaults::PcAnnounceTimeout)), pc_recovery_ (param(conf_, uri, Conf::PcRecovery, Defaults::PcRecovery)), rst_uuid_(), rst_view_() { if (uri_.get_scheme() != Conf::PcScheme) { log_fatal << "invalid uri: " << uri_.to_string(); } conf_.set(Conf::PcRecovery, gu::to_string(pc_recovery_)); bool restored = false; ViewState vst(rst_uuid_, rst_view_, conf_); if (pc_recovery_) { if (vst.read_file()) { log_info << "restore pc from disk successfully"; rst_uuid_.increment_incarnation(); vst.write_file(); restored = true; } else { log_info << "restore pc from disk failed"; } } else { log_info << "skip pc recovery and remove state file"; ViewState::remove_file(conf_); } gmcast_ = new GMCast(pnet(), uri_, restored ? &rst_uuid_ : NULL); const UUID& uuid(gmcast_->uuid()); if (uuid == UUID::nil()) { gu_throw_fatal << "invalid UUID: " << uuid; } evs::UserMessage evsum; evs_ = new evs::Proto(pnet().conf(), uuid, gmcast_->segment(), uri_, gmcast_->mtu() - 2*evsum.serial_size(), restored ? &rst_view_ : NULL); pc_ = new pc::Proto (pnet().conf(), uuid, gmcast_->segment(), uri_, restored ? &rst_view_ : NULL); conf_.set(Conf::PcLinger, gu::to_string(linger_)); } gcomm::PC::~PC() { if (!closed_) { try { close(); } catch (...) { } sleep(1); // half-hearted attempt to avoid race with client threads } delete gmcast_; delete evs_; delete pc_; } galera-4-26.4.25/gcomm/src/pc_proto.cpp000644 000164 177776 00000157414 15107057155 020752 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #include "pc_proto.hpp" #include "pc_message.hpp" #include "gcomm/util.hpp" #include "gu_lock.hpp" #include "gu_logger.hpp" #include "gu_macros.h" #include #include #include // std::cerr #include using std::rel_ops::operator!=; using std::rel_ops::operator>; // // Helpers // class SelectPrimOp { public: SelectPrimOp(gcomm::pc::Proto::SMMap& states) : states_(states) { } void operator()(const gcomm::pc::Proto::SMMap::value_type& vt) const { const gcomm::UUID& uuid(gcomm::pc::Proto::SMMap::key(vt)); const gcomm::pc::Message& msg(gcomm::pc::Proto::SMMap::value(vt)); const gcomm::pc::NodeMap& nm(msg.node_map()); gcomm::pc::NodeMap::const_iterator nm_i(nm.find(uuid)); if (nm_i == nm.end()) { gu_throw_error(EPROTO) << "protocol error, self not found from " << uuid << " state msg node list"; } if (gcomm::pc::NodeMap::value(nm_i).prim() == true) { states_.insert(vt); } } private: gcomm::pc::Proto::SMMap& states_; }; class ToSeqCmpOp { public: bool operator()(const gcomm::pc::Proto::SMMap::value_type& a, const gcomm::pc::Proto::SMMap::value_type& b) const { const gcomm::pc::Node& astate( gcomm::pc::NodeMap::value( gcomm::pc::Proto::SMMap::value(a).node_map() .find_checked(gcomm::pc::Proto::SMMap::key(a)))); const gcomm::pc::Node& bstate( gcomm::pc::NodeMap::value( gcomm::pc::Proto::SMMap::value(b).node_map() .find_checked(gcomm::pc::Proto::SMMap::key(b)))); return (astate.to_seq() < bstate.to_seq()); } }; class UUIDFixedPartCmp { public: UUIDFixedPartCmp(const gcomm::UUID& uuid) : uuid_(uuid) { } bool operator()(const gcomm::NodeList::value_type& vt) const { return uuid_.fixed_part_matches(vt.first); } private: const gcomm::UUID& uuid_; }; static bool UUID_fixed_part_cmp_equal(const gcomm::NodeList::value_type& lhs, const gcomm::NodeList::value_type& rhs) { return lhs.first.fixed_part_matches(rhs.first); } static bool UUID_fixed_part_cmp_intersection(const gcomm::UUID& lhs, const gcomm::UUID& rhs) { return lhs.fixed_part_matches(rhs) ? false : lhs < rhs; } // Return max to seq found from states, -1 if states is empty static int64_t get_max_to_seq(const gcomm::pc::Proto::SMMap& states) { if (states.empty() == true) return -1; gcomm::pc::Proto::SMMap::const_iterator max_i( max_element(states.begin(), states.end(), ToSeqCmpOp())); const gcomm::pc::Node& state( gcomm::pc::Proto::SMMap::value(max_i).node( gcomm::pc::Proto::SMMap::key(max_i))); return state.to_seq(); } static void checksum(gcomm::pc::Message& msg, gcomm::Datagram& dg) { uint16_t crc16(gcomm::crc16(dg, 4)); msg.checksum(crc16, true); gcomm::pop_header(msg, dg); gcomm::push_header(msg, dg); } static void test_checksum(gcomm::pc::Message& msg, const gcomm::Datagram& dg, size_t offset) { uint16_t msg_crc16(msg.checksum()); uint16_t crc16(gcomm::crc16(dg, offset + 4)); if (crc16 != msg_crc16) { gu_throw_fatal << "Message checksum failed"; } } std::ostream& gcomm::pc::operator<<(std::ostream& os, const gcomm::pc::Proto& p) { os << "pc::Proto{"; os << "uuid=" << p.my_uuid_ << ","; os << "start_prim=" << p.start_prim_ << ","; os << "npvo=" << p.npvo_ << ","; os << "ignore_sb=" << p.ignore_sb_ << ","; os << "ignore_quorum=" << p.ignore_quorum_ << ","; os << "state=" << p.state_ << ","; os << "last_sent_seq=" << p.last_sent_seq_ << ","; os << "checksum=" << p.checksum_ << ","; os << "instances=\n" << p.instances_ << ","; os << "state_msgs=\n" << p.state_msgs_ << ","; os << "current_view=" << p.current_view_ << ","; os << "pc_view=" << p.pc_view_ << ","; // os << "views=" << p.views_ << ","; os << "mtu=" << p.mtu_ << "}"; return os; } // // // void gcomm::pc::Proto::send_state() { log_debug << self_id() << " sending state"; StateMessage pcs(current_view_.version()); NodeMap& im(pcs.node_map()); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { // Assume all nodes in the current view have reached current to_seq Node& local_state(NodeMap::value(i)); if (current_view_.is_member(NodeMap::key(i)) == true) { local_state.set_to_seq(to_seq()); } if (is_evicted(NodeMap::key(i)) == true) { local_state.set_evicted(true); } im.insert_unique(std::make_pair(NodeMap::key(i), local_state)); } log_debug << self_id() << " local to seq " << to_seq(); log_debug << self_id() << " sending state: " << pcs; gu::Buffer buf; serialize(pcs, buf); Datagram dg(buf); if (send_down(dg, ProtoDownMeta())) { gu_throw_fatal << "pass down failed"; } } static std::string send_error_str(int const err) { std::ostringstream os; switch (err) { case 0: os << "Success"; break; case EAGAIN: os << "Cluster configuration change in progress or flow control active"; break; case ENOTCONN: os << "Not connected to the cluster"; break; default: os << "Unknown error: " << err; break; } return os.str(); } int gcomm::pc::Proto::send_install(bool bootstrap, int weight) { gcomm_assert(bootstrap == false || weight == -1); log_debug << self_id() << " send install"; InstallMessage pci(current_view_.version()); NodeMap& im(pci.node_map()); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { if (current_view_.members().find(SMMap::key(i)) != current_view_.members().end()) { gu_trace( im.insert_unique( std::make_pair( SMMap::key(i), SMMap::value(i).node((SMMap::key(i)))))); } } if (bootstrap == true) { pci.flags(pci.flags() | InstallMessage::F_BOOTSTRAP); log_debug << self_id() << " sending PC bootstrap message " << pci; } else if (weight != -1) { pci.flags(pci.flags() | InstallMessage::F_WEIGHT_CHANGE); Node& self(pci.node(uuid())); self.set_weight(weight); log_info << self_id() << " sending PC weight change message " << pci; } else { log_debug << self_id() << " sending install: " << pci; } gu::Buffer buf; serialize(pci, buf); Datagram dg(buf); int ret = send_down(dg, ProtoDownMeta()); if (ret) { log_info << "sending install message for new primary component failed: " << send_error_str(ret) << ", will retry in next configuration"; } return ret; } void gcomm::pc::Proto::deliver_view(bool bootstrap) { View v(pc_view_.version(), pc_view_.id(), bootstrap); for (NodeMap::const_iterator i = instances_.begin(); i != instances_.end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { v.add_partitioned(NodeMap::key(i), NodeMap::value(i).segment()); } else { v.add_member(NodeMap::key(i), NodeMap::value(i).segment()); } } ProtoUpMeta um(UUID::nil(), ViewId(), &v); log_info << v; send_up(Datagram(), um); set_stable_view(v); if (v.id().type() == V_NON_PRIM && rst_view_ && !start_prim_) { // pc recovery process. uint32_t max_view_seqno = 0; bool check = true; for(NodeMap::const_iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); // just consider property of nodes in restored view. if (std::find_if(rst_view_->members().begin(), rst_view_->members().end(), UUIDFixedPartCmp(uuid)) != rst_view_->members().end()) { const Node& node(NodeMap::value(i)); const ViewId& last_prim(node.last_prim()); if (last_prim.type() != V_NON_PRIM || last_prim.uuid() != rst_view_ -> id().uuid()) { log_warn << "node uuid: " << uuid << " last_prim(type: " << last_prim.type() << ", uuid: " << last_prim.uuid() << ") is inconsistent to " << "restored view(type: V_NON_PRIM, uuid: " << rst_view_ ->id().uuid(); check = false; break; } max_view_seqno = std::max(max_view_seqno, last_prim.seq()); } } if (check) { assert(max_view_seqno != 0); log_debug << "max_view_seqno = " << max_view_seqno << ", rst_view_seqno = " << rst_view_ -> id().seq(); log_debug << "rst_view = "; log_debug << *rst_view_; log_debug << "deliver_view = "; log_debug << v; if (rst_view_->id().seq() == max_view_seqno && v.members().size() == rst_view_->members().size() && std::equal(v.members().begin(), v.members().end(), rst_view_->members().begin(), UUID_fixed_part_cmp_equal)) { log_info << "promote to primary component"; // All of the nodes are in non-primary so we need to bootstrap. send_install(true); // Rst_view will be cleared after primary component is formed. // If the rst_view would be cleared here and there would be // network partitioning before install message was delivered, // bootstrapping the primary component would never happen again. } } } // if pc is formed by normal process(start_prim_=true) instead of // pc recovery process, rst_view_ won't be clear. // however this will prevent pc remerge(see is_prim function) // so we have to clear rst_view_ once pc is formed.. if (v.id().type() == V_PRIM && rst_view_) { log_info << "clear restored view"; rst_view_ = NULL; } } void gcomm::pc::Proto::mark_non_prim() { pc_view_ = View(current_view_.version(), ViewId(V_NON_PRIM, current_view_.id())); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); Node& inst(NodeMap::value(i)); if (current_view_.members().find(uuid) != current_view_.members().end()) { inst.set_prim(false); pc_view_.add_member(uuid, inst.segment()); } } set_prim(false); } void gcomm::pc::Proto::shift_to(const State s) { // State graph static const bool allowed[S_MAX][S_MAX] = { // Cl S-E IN P Trans N-P { false, false, false, false, false, true }, // Closed { true, false, true, false, true, true }, // States exch { true, false, false, true, true, true }, // Install { true, false, false, false, true, true }, // Prim { true, true, false, false, false, true }, // Trans { true, false, false, true, true, true } // Non-prim }; if (allowed[state()][s] == false) { gu_throw_fatal << "Forbidden state transition: " << to_string(state()) << " -> " << to_string(s); } switch (s) { case S_CLOSED: break; case S_STATES_EXCH: state_msgs_.clear(); break; case S_INSTALL: break; case S_PRIM: { pc_view_ = View(current_view_.version(), ViewId(V_PRIM, current_view_.id())); for (NodeMap::iterator i = instances_.begin(); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); Node& inst(NodeMap::value(i)); NodeList::const_iterator nli; if ((nli = current_view_.members().find(uuid)) != current_view_.members().end()) { inst.set_prim(true); inst.set_last_prim(ViewId(V_PRIM, current_view_.id())); inst.set_last_seq(0); inst.set_to_seq(to_seq()); pc_view_.add_member(uuid, inst.segment()); } else { inst.set_prim(false); } } last_sent_seq_ = 0; set_prim(true); break; } case S_TRANS: break; case S_NON_PRIM: mark_non_prim(); break; default: ; } log_debug << self_id() << " shift_to: " << to_string(state()) << " -> " << to_string(s) << " prim " << prim() << " last prim " << last_prim() << " to_seq " << to_seq(); state_ = s; } void gcomm::pc::Proto::handle_first_trans(const View& view) { gcomm_assert(state() == S_NON_PRIM); gcomm_assert(view.type() == V_TRANS); if (start_prim_ == true) { if (view.members().size() > 1 || view.is_empty()) { gu_throw_fatal << "Corrupted view"; } if (NodeList::key(view.members().begin()) != uuid()) { gu_throw_fatal << "Bad first UUID: " << NodeList::key(view.members().begin()) << ", expected: " << uuid(); } set_last_prim(ViewId(V_PRIM, view.id())); set_prim(true); } current_view_ = view; shift_to(S_TRANS); } // Compute weighted sum of members in node list. If member cannot be found // from node_map its weight is assumed to be zero. static size_t weighted_sum(const gcomm::NodeList& node_list, const gcomm::pc::NodeMap& node_map) { size_t sum(0); for (gcomm::NodeList::const_iterator i(node_list.begin()); i != node_list.end(); ++i) { int weight(0); gcomm::pc::NodeMap::const_iterator node_i( node_map.find(gcomm::NodeList::key(i))); if (node_i != node_map.end()) { const gcomm::pc::Node& node(gcomm::pc::NodeMap::value(node_i)); gcomm_assert(node.weight() >= 0 && node.weight() <= 0xff); weight = node.weight(); } else { weight = 0; } sum += weight; } return sum; } // Check if all members in node_list have weight associated. This is needed // to fall back to backwards compatibility mode during upgrade (all weights are // assumed to be one). See have_quorum() and have_split_brain() below. static bool have_weights(const gcomm::NodeList& node_list, const gcomm::pc::NodeMap& node_map) { for (gcomm::NodeList::const_iterator i(node_list.begin()); i != node_list.end(); ++i) { gcomm::pc::NodeMap::const_iterator node_i( node_map.find(gcomm::NodeList::key(i))); if (node_i != node_map.end()) { const gcomm::pc::Node& node(gcomm::pc::NodeMap::value(node_i)); if (node.weight() == -1) { return false; } } } return true; } static bool node_list_intersection_comp(const gcomm::NodeList::value_type& vt1, const gcomm::NodeList::value_type& vt2) { return (vt1.first < vt2.first); } static gcomm::NodeList node_list_intersection(const gcomm::NodeList& nl1, const gcomm::NodeList& nl2) { gcomm::NodeList ret; std::set_intersection(nl1.begin(), nl1.end(), nl2.begin(), nl2.end(), std::inserter(ret, ret.begin()), node_list_intersection_comp); return ret; } bool gcomm::pc::Proto::have_quorum(const View& view, const View& pc_view) const { // Compare only against members and left which were part of the pc_view. gcomm::NodeList memb_intersection( node_list_intersection(view.members(), pc_view.members())); gcomm::NodeList left_intersection( node_list_intersection(view.left(), pc_view.members())); if (have_weights(view.members(), instances_) && have_weights(view.left(), instances_) && have_weights(pc_view.members(), instances_)) { return (weighted_sum(memb_intersection, instances_) * 2 + weighted_sum(left_intersection, instances_) > weighted_sum(pc_view.members(), instances_)); } else { return (memb_intersection.size()*2 + left_intersection.size() > pc_view.members().size()); } } bool gcomm::pc::Proto::have_split_brain(const View& view) const { // Compare only against members and left which were part of the pc_view. gcomm::NodeList memb_intersection( node_list_intersection(view.members(), pc_view_.members())); gcomm::NodeList left_intersection( node_list_intersection(view.left(), pc_view_.members())); if (have_weights(view.members(), instances_) && have_weights(view.left(), instances_) && have_weights(pc_view_.members(), instances_)) { return (weighted_sum(memb_intersection, instances_) * 2 + weighted_sum(left_intersection, instances_) == weighted_sum(pc_view_.members(), instances_)); } else { return (memb_intersection.size()*2 + left_intersection.size() == pc_view_.members().size()); } } void gcomm::pc::Proto::handle_trans(const View& view) { gcomm_assert(view.id().type() == V_TRANS); gcomm_assert(view.id().uuid() == current_view_.id().uuid() && view.id().seq() == current_view_.id().seq()); gcomm_assert(view.version() == current_view_.version()); log_debug << self_id() << " \n\n current view " << current_view_ << "\n\n next view " << view << "\n\n pc view " << pc_view_; log_debug << *this; if (have_quorum(view, pc_view_) == false) { if (closing_ == false && ignore_sb_ == true && have_split_brain(view)) { // configured to ignore split brain log_info << "Ignoring possible split-brain " << "(allowed by configuration) from view:\n" << current_view_ << "\nto view:\n" << view; } else if (closing_ == false && ignore_quorum_ == true) { // configured to ignore lack of quorum log_info << "Ignoring lack of quorum " << "(allowed by configuration) from view:\n" << current_view_ << "\nto view:\n" << view; } else { current_view_ = view; // shift_to(S_NON_PRIM); mark_non_prim(); deliver_view(); shift_to(S_TRANS); return; } } else { log_debug << self_id() << " quorum ok"; } current_view_ = view; shift_to(S_TRANS); } void gcomm::pc::Proto::handle_reg(const View& view) { gcomm_assert(view.type() == V_REG); gcomm_assert(state() == S_TRANS); if (view.is_empty() == false && view.id().seq() <= current_view_.id().seq()) { gu_throw_fatal << "Non-increasing view ids: current view " << current_view_.id() << " new view " << view.id(); } if (current_view_.version() < view.version()) { log_info << "PC protocol upgrade " << current_view_.version() << " -> " << view.version(); } else if (current_view_.version() > view.version()) { log_info << "PC protocol downgrade " << current_view_.version() << " -> " << view.version(); } current_view_ = view; views_.push_back(current_view_); if (current_view_.is_empty() == true) { shift_to(S_NON_PRIM); deliver_view(); shift_to(S_CLOSED); } else { shift_to(S_STATES_EXCH); send_state(); } } void gcomm::pc::Proto::handle_view(const View& view) { // We accept only EVS TRANS and REG views if (view.type() != V_TRANS && view.type() != V_REG) { gu_throw_fatal << "Invalid view type"; } // Make sure that self exists in view if (view.is_empty() == false && view.is_member(uuid()) == false) { gu_throw_fatal << "Self not found from non empty view: " << view; } log_debug << self_id() << " " << view; if (view.type() == V_TRANS) { if (current_view_.type() == V_NONE) { handle_first_trans(view); } else { handle_trans(view); } } else { handle_reg(view); } } int gcomm::pc::Proto::cluster_weight() const { int total_weight(0); if (pc_view_.type() == V_PRIM) { for (NodeMap::const_iterator i(instances_.begin()); i != instances_.end(); ++i) { if (pc_view_.id() == i->second.last_prim()) { total_weight += i->second.weight(); } } } return total_weight; } // Validate state message agains local state void gcomm::pc::Proto::validate_state_msgs() const { // #622, #638 Compute max TO seq among states from prim SMMap prim_state_msgs; std::for_each(state_msgs_.begin(), state_msgs_.end(), SelectPrimOp(prim_state_msgs)); const int64_t max_to_seq(get_max_to_seq(prim_state_msgs)); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const UUID& msg_source_uuid(SMMap::key(i)); const Node& msg_source_state(SMMap::value(i).node(msg_source_uuid)); const NodeMap& msg_state_map(SMMap::value(i).node_map()); for (NodeMap::const_iterator si = msg_state_map.begin(); si != msg_state_map.end(); ++si) { const UUID& uuid(NodeMap::key(si)); const Node& msg_state(NodeMap::value(si)); const Node& local_state(NodeMap::value(instances_.find_checked(uuid))); if (prim() == true && msg_source_state.prim() == true && msg_state.prim() == true) { if (current_view_.is_member(uuid) == true) { // Msg source claims to come from prim view and this node // is in prim. All message prim view states must be equal // to local ones. if (msg_state.weight() == -1) { // backwards compatibility, ignore weight in state check gcomm_assert( msg_state.prim() == local_state.prim() && msg_state.last_seq() == local_state.last_seq() && msg_state.last_prim() == local_state.last_prim() && msg_state.to_seq() == local_state.to_seq()) << self_id() << " node " << uuid << " prim state message and local states not consistent:" << " msg node " << msg_state << " local state " << local_state; } else { gcomm_assert(msg_state == local_state) << self_id() << " node " << uuid << " prim state message and local states not consistent:" << " msg node " << msg_state << " local state " << local_state; } gcomm_assert(msg_state.to_seq() == max_to_seq) << self_id() << " node " << uuid << " to seq not consistent with local state:" << " max to seq " << max_to_seq << " msg state to seq " << msg_state.to_seq(); } } else if (prim() == true) { log_debug << self_id() << " node " << uuid << " from " << msg_state.last_prim() << " joining " << last_prim(); } else if (msg_state.prim() == true) { // @todo: Cross check with other state messages coming from prim log_debug << self_id() << " joining to " << msg_state.last_prim(); } } } } // @note This method is currently for sanity checking only. RTR is not // implemented yet. bool gcomm::pc::Proto::requires_rtr() const { bool ret = false; // Find maximum reported to_seq const int64_t max_to_seq(get_max_to_seq(state_msgs_)); for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { NodeMap::const_iterator ii( SMMap::value(i).node_map().find_checked(SMMap::key(i))); const Node& inst = NodeMap::value(ii); const int64_t to_seq = inst.to_seq(); const ViewId last_prim = inst.last_prim(); if (to_seq != -1 && to_seq != max_to_seq && last_prim.type() != V_NON_PRIM) { log_debug << self_id() << " RTR is needed: " << to_seq << " / " << last_prim; ret = true; } } return ret; } void gcomm::pc::Proto::cleanup_instances() { gcomm_assert(state() == S_PRIM); gcomm_assert(current_view_.type() == V_REG); NodeMap::iterator i, i_next; for (i = instances_.begin(); i != instances_.end(); i = i_next) { i_next = i, ++i_next; const UUID& uuid(NodeMap::key(i)); if (current_view_.is_member(uuid) == false) { log_debug << self_id() << " cleaning up instance " << uuid; instances_.erase(i); } else { // Clear unknow status from nodes in current view here. // New PC has been installed and if paritioning happens, // we either know for sure that the other partitioned component ends // up in non-prim, or in other case we have valid PC view to // deal with in case of remerge. NodeMap::value(i).set_un(false); } } } bool gcomm::pc::Proto::is_prim() const { bool prim(false); ViewId last_prim(V_NON_PRIM); int64_t to_seq(-1); // Check if any of instances claims to come from prim view for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const Node& state(SMMap::value(i).node(SMMap::key(i))); if (state.prim() == true) { log_info << "Node " << SMMap::key(i) << " state prim"; prim = true; last_prim = state.last_prim(); to_seq = state.to_seq(); break; } } // Verify that all members are either coming from the same prim // view or from non-prim for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const Node& state(SMMap::value(i).node(SMMap::key(i))); if (state.prim() == true) { if (state.last_prim() != last_prim) { gu_throw_fatal << self_id() << " last prims not consistent"; } if (state.to_seq() != to_seq) { gu_throw_fatal << self_id() << " TO seqs not consistent"; } } else { log_debug << "Non-prim " << SMMap::key(i) <<" from " << state.last_prim() << " joining prim"; } } // No members coming from prim view, check if last known prim // view can be recovered (majority of members from last prim alive) if (prim == false) { gcomm_assert(last_prim == ViewId(V_NON_PRIM)) << last_prim << " != " << ViewId(V_NON_PRIM); // First determine if there are any nodes still in unknown state. std::set un; for (NodeMap::const_iterator i(instances_.begin()); i != instances_.end(); ++i) { if (NodeMap::value(i).un() == true && current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { un.insert(NodeMap::key(i)); } } if (un.empty() == false) { std::ostringstream oss; std::copy(un.begin(), un.end(), std::ostream_iterator(oss, " ")); log_info << "Nodes " << oss.str() << "are still in unknown state, " << "unable to rebootstrap new prim"; return false; } // Collect last prim members and evicted from state messages MultiMap last_prim_uuids; std::set evicted; for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { for (NodeMap::const_iterator j = SMMap::value(i).node_map().begin(); j != SMMap::value(i).node_map().end(); ++j) { const UUID& uuid(NodeMap::key(j)); const Node& inst(NodeMap::value(j)); if (inst.last_prim().type() != V_NON_PRIM && std::find::iterator, std::pair >( last_prim_uuids.begin(), last_prim_uuids.end(), std::make_pair(inst.last_prim(), uuid)) == last_prim_uuids.end()) { last_prim_uuids.insert(std::make_pair(inst.last_prim(), uuid)); } if (inst.evicted() == true) { evicted.insert(uuid); } } } if (last_prim_uuids.empty() == true) { log_info << "No nodes coming from primary view, " << "primary view is not possible"; return false; } // Construct greatest view set of UUIDs ignoring evicted ones std::set greatest_view; // Get range of UUIDs in greatest views const ViewId greatest_view_id(last_prim_uuids.rbegin()->first); std::pair::const_iterator, MultiMap::const_iterator> gvi = last_prim_uuids.equal_range(greatest_view_id); // Iterate over range and insert into greatest view if not evicted for (MultiMap::const_iterator i = gvi.first; i != gvi.second; ++i) { if (evicted.find(MultiMap::value(i)) == evicted.end()) { std::pair::iterator, bool> iret = greatest_view.insert( MultiMap::value(i)); // Assert that inserted UUID was unique gcomm_assert(iret.second == true); } } log_debug << self_id() << " greatest view id " << greatest_view_id; // Compute list of present view members std::set present; for (NodeList::const_iterator i = current_view_.members().begin(); i != current_view_.members().end(); ++i) { present.insert(NodeList::key(i)); } // Compute intersection of present and greatest view. If the // intersection size is the same as greatest view size, // it is safe to rebootstrap PC. std::set intersection; set_intersection(greatest_view.begin(), greatest_view.end(), present.begin(), present.end(), inserter(intersection, intersection.begin()), UUID_fixed_part_cmp_intersection); log_debug << self_id() << " intersection size " << intersection.size() << " greatest view size " << greatest_view.size(); if (intersection.size() == greatest_view.size()) { log_info << "re-bootstrapping prim from partitioned components"; prim = true; } } return prim; } void gcomm::pc::Proto::handle_state(const Message& msg, const UUID& source) { gcomm_assert(msg.type() == Message::PC_T_STATE); gcomm_assert(state() == S_STATES_EXCH); gcomm_assert(state_msgs_.size() < current_view_.members().size()); log_debug << self_id() << " handle state from " << source << " " << msg; // Early check for possibly conflicting primary components. The one // with greater view id may continue (as it probably has been around // for longer timer). However, this should be configurable policy. if (prim() == true) { const Node& si(NodeMap::value(msg.node_map().find(source))); if (si.prim() == true && si.last_prim() != last_prim()) { log_warn << self_id() << " conflicting prims: my prim: " << last_prim() << " other prim: " << si.last_prim(); if ((npvo_ == true && last_prim() < si.last_prim()) || (npvo_ == false && last_prim() > si.last_prim())) { log_warn << self_id() << " discarding other prim view: " << (npvo_ == true ? "newer" : "older" ) << " overrides"; return; } else { gu_throw_fatal << self_id() << " aborting due to conflicting prims: " << (npvo_ == true ? "newer" : "older" ) << " overrides"; } } } state_msgs_.insert_unique(std::make_pair(source, msg)); if (state_msgs_.size() == current_view_.members().size()) { // Insert states from previously unseen nodes into local state map for (SMMap::const_iterator i = state_msgs_.begin(); i != state_msgs_.end(); ++i) { const NodeMap& sm_im(SMMap::value(i).node_map()); for (NodeMap::const_iterator j = sm_im.begin(); j != sm_im.end(); ++j) { const UUID& sm_uuid(NodeMap::key(j)); const Node& sm_node(NodeMap::value(j)); NodeMap::iterator local_node_i(instances_.find(sm_uuid)); if (local_node_i == instances_.end()) { const Node& sm_state(NodeMap::value(j)); instances_.insert_unique(std::make_pair(sm_uuid, sm_state)); } else { Node& local_node(NodeMap::value(local_node_i)); if (local_node.weight() == -1 && sm_node.weight() != -1) { // backwards compatibility: override weight for // instances which have been reported by old nodes // but have weights associated anyway local_node.set_weight(sm_node.weight()); } else if (local_node.weight() != sm_node.weight() && SMMap::key(i) == NodeMap::key(local_node_i)) { log_warn << self_id() << "overriding reported weight for " << NodeMap::key(local_node_i); local_node.set_weight(sm_node.weight()); } if (prim() == false && sm_node.un() == true && // note #92 local_node_i != self_i_) { // If coming from non-prim, set local instance status // to unknown if any of the state messages has it // marked unknown. If coming from prim, there is // no need to set this as it is known if the node // corresponding to local instance is in primary. local_node.set_un(true); } } } } // Validate that all state messages are consistent before proceeding gu_trace(validate_state_msgs()); if (is_prim() == true) { // @note Requires RTR does not actually have effect, but let it // be for debugging purposes until a while (void)requires_rtr(); shift_to(S_INSTALL); if (current_view_.members().find(uuid()) == current_view_.members().begin()) { send_install(false); } } else { // #571 Deliver NON-PRIM views in all cases. shift_to(S_NON_PRIM); deliver_view(); } } } void gcomm::pc::Proto::handle_install(const Message& msg, const UUID& source) { if (state() == S_PRIM) { if ((msg.flags() & Message::F_WEIGHT_CHANGE) == 0) { log_warn << "non weight changing install in S_PRIM: " << msg; } else { NodeMap::iterator local_i(instances_.find(source)); const Node& msg_n(msg.node(source)); log_info << self_id() << " changing node " << source << " weight (reg) " << NodeMap::value(local_i).weight() << " -> " << msg_n.weight(); NodeMap::value(local_i).set_weight(msg_n.weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(msg_n.weight())); } } return; } else if (state() == S_TRANS) { handle_trans_install(msg, source); return; } gcomm_assert(msg.type() == Message::PC_T_INSTALL); gcomm_assert(state() == S_INSTALL || state() == S_NON_PRIM); if ((msg.flags() & Message::F_BOOTSTRAP) == 0) { log_debug << self_id() << " handle install from " << source << " " << msg; } else { log_debug << self_id() << " handle bootstrap install from " << source << " " << msg; if (state() == S_INSTALL) { log_info << "ignoring bootstrap install in " << to_string(state()) << " state"; return; } } // Validate own state NodeMap::const_iterator mi(msg.node_map().find_checked(uuid())); const Node& m_state(NodeMap::value(mi)); if (m_state.weight() == -1) { // backwards compatibility, ignore weight in state check const Node& self_state(NodeMap::value(self_i_)); if ((m_state.prim() == self_state.prim() && m_state.last_seq() == self_state.last_seq() && m_state.last_prim() == self_state.last_prim() && m_state.to_seq() == self_state.to_seq()) == false) { gu_throw_fatal << self_id() << "Install message self state does not match, " << "message state: " << m_state << ", local state: " << NodeMap::value(self_i_); } } else { if (m_state != NodeMap::value(self_i_)) { gu_throw_fatal << self_id() << "Install message self state does not match, " << "message state: " << m_state << ", local state: " << NodeMap::value(self_i_); } } // Set TO seqno according to install message int64_t to_seq(-1); bool prim_found(false); for (mi = msg.node_map().begin(); mi != msg.node_map().end(); ++mi) { const Node& m_state = NodeMap::value(mi); // check that all TO seqs coming from prim are same if (m_state.prim() == true && to_seq != -1) { if (m_state.to_seq() != to_seq) { gu_throw_fatal << "Install message TO seqnos inconsistent"; } } if (m_state.prim() == true) { prim_found = true; to_seq = std::max(to_seq, m_state.to_seq()); } } if (prim_found == false) { // #277 // prim comp was restored from non-prims, find out max known TO seq for (mi = msg.node_map().begin(); mi != msg.node_map().end(); ++mi) { const Node& m_state = NodeMap::value(mi); to_seq = std::max(to_seq, m_state.to_seq()); } log_debug << "assigning TO seq to " << to_seq << " after restoring prim"; } log_debug << self_id() << " setting TO seq to " << to_seq; set_to_seq(to_seq); shift_to(S_PRIM); deliver_view(msg.flags() & Message::F_BOOTSTRAP); cleanup_instances(); } namespace { class ViewUUIDLT { public: bool operator()(const gcomm::NodeList::value_type& a, const gcomm::NodeList::value_type& b) const { return (a.first < b.first); } }; } // When delivering install message in trans view quorum has to be re-evaluated // as the partitioned component may have installed prim view due to higher // weight. To do this, we construct pc view that would have been installed // if install message was delivered in reg view and make quorum computation // against it. // // It is not actually known if partitioned component installed new PC, so // we mark partitioned nodes states as unknown. This is to provide deterministic // way to prevent automatic rebootstrapping of PC if some of the seen nodes // is in unknown state. void gcomm::pc::Proto::handle_trans_install(const Message& msg, const UUID& source) { gcomm_assert(msg.type() == Message::PC_T_INSTALL); gcomm_assert(state() == S_TRANS); gcomm_assert(current_view_.type() == V_TRANS); if ((msg.flags() & Message::F_BOOTSTRAP) != 0) { log_info << "Dropping bootstrap install in TRANS state"; return; } gcomm_assert(have_quorum(current_view_, pc_view_) == true); if ((msg.flags() & Message::F_WEIGHT_CHANGE) != 0) { NodeList nl; nl.insert(current_view_.members().begin(), current_view_.members().end()); nl.insert(current_view_.left().begin(), current_view_.left().end()); if (std::includes(nl.begin(), nl.end(), pc_view_.members().begin(), pc_view_.members().end(), ViewUUIDLT()) == false) { // Weight changing install message delivered in trans view // and previous pc view has partitioned. // // Need to be very conservative: We don't know what happened to // weight change message in partitioned component, so it may not be // safe to do quorum calculation. Shift to non-prim and // wait until partitioned component comes back (or prim is // rebootstrapped). // // It would be possible to do more fine grained decisions // based on the source of the message, but to keep things simple // always go to non-prim, this is very cornerish case after all. log_info << "Weight changing trans install leads to non-prim"; mark_non_prim(); deliver_view(); for (NodeMap::const_iterator i(msg.node_map().begin()); i != msg.node_map().end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { NodeMap::iterator local_i(instances_.find(NodeMap::key(i))); if (local_i == instances_.end()) { log_warn << "Node " << NodeMap::key(i) << " not found from instances"; } else { if (NodeMap::key(i) == source) { NodeMap::value(local_i).set_weight( NodeMap::value(i).weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(NodeMap::value(i).weight())); } } NodeMap::value(local_i).set_un(true); } } } } else { NodeMap::iterator local_i(instances_.find(source)); const Node& msg_n(msg.node(source)); log_info << self_id() << " changing node " << source << " weight (trans) " << NodeMap::value(local_i).weight() << " -> " << msg_n.weight(); NodeMap::value(local_i).set_weight(msg_n.weight()); if (source == uuid()) { conf_.set(gcomm::Conf::PcWeight, gu::to_string(msg_n.weight())); } } } else { View new_pc_view(current_view_.version(), ViewId(V_PRIM, current_view_.id())); for (NodeMap::iterator i(instances_.begin()); i != instances_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); NodeMap::const_iterator ni(msg.node_map().find(uuid)); if (ni != msg.node_map().end()) { new_pc_view.add_member(uuid, 0); } } if (have_quorum(current_view_, new_pc_view) == false || pc_view_.type() == V_NON_PRIM) { log_info << "Trans install leads to non-prim"; mark_non_prim(); deliver_view(); // Mark all nodes in install msg node map but not in current // view with unknown status. It is not known if they delivered // install message in reg view and so formed new PC. for (NodeMap::const_iterator i(msg.node_map().begin()); i != msg.node_map().end(); ++i) { if (current_view_.members().find(NodeMap::key(i)) == current_view_.members().end()) { NodeMap::iterator local_i(instances_.find(NodeMap::key(i))); if (local_i == instances_.end()) { log_warn << "Node " << NodeMap::key(i) << " not found from instances"; } else { NodeMap::value(local_i).set_un(true); } } } } } } void gcomm::pc::Proto::handle_user(const Message& msg, const Datagram& dg, const ProtoUpMeta& um) { int64_t curr_to_seq(-1); if (prim() == true) { if (um.order() == O_SAFE) { set_to_seq(to_seq() + 1); curr_to_seq = to_seq(); } } else if (current_view_.members().find(um.source()) == current_view_.members().end()) { gcomm_assert(current_view_.type() == V_TRANS); // log_debug << self_id() // << " dropping message from out of view source in non-prim"; return; } if (um.order() == O_SAFE) { Node& state(NodeMap::value(instances_.find_checked(um.source()))); if (state.last_seq() + 1 != msg.seq()) { gu_throw_fatal << "gap in message sequence: source=" << um.source() << " expected_seq=" << state.last_seq() + 1 << " seq=" << msg.seq(); } state.set_last_seq(msg.seq()); } Datagram up_dg(dg, dg.offset() + msg.serial_size()); gu_trace(send_up(up_dg, ProtoUpMeta(um.source(), pc_view_.id(), 0, um.user_type(), um.order(), curr_to_seq))); } void gcomm::pc::Proto::handle_msg(const Message& msg, const Datagram& rb, const ProtoUpMeta& um) { // EVS provides send view delivery, so this assertion // should always hold. assert(msg.version() == current_view_.version()); enum Verdict { ACCEPT, DROP, FAIL }; static const Verdict verdicts[S_MAX][Message::PC_T_MAX] = { // Msg types // NONE, STATE, INSTALL, USER { FAIL, FAIL, FAIL, FAIL }, // Closed { FAIL, ACCEPT, FAIL, FAIL }, // States exch { FAIL, FAIL, ACCEPT, FAIL }, // INSTALL { FAIL, FAIL, ACCEPT, ACCEPT }, // PRIM { FAIL, DROP, ACCEPT, ACCEPT }, // TRANS { FAIL, ACCEPT, ACCEPT, ACCEPT } // NON-PRIM }; Message::Type msg_type(msg.type()); Verdict verdict (verdicts[state()][msg.type()]); if (verdict == FAIL) { gu_throw_fatal << "Invalid input, message " << msg.to_string() << " in state " << to_string(state()); } else if (verdict == DROP) { log_debug << "Dropping input, message " << msg.to_string() << " in state " << to_string(state()); return; } switch (msg_type) { case Message::PC_T_STATE: gu_trace(handle_state(msg, um.source())); break; case Message::PC_T_INSTALL: gu_trace(handle_install(msg, um.source())); { gu::Lock lock(sync_param_mutex_); if (param_sync_set_ && (um.source() == uuid())) { param_sync_set_ = false; sync_param_cond_.signal(); } } break; case Message::PC_T_USER: gu_trace(handle_user(msg, rb, um)); break; default: gu_throw_fatal << "Invalid message"; } } void gcomm::pc::Proto::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view() == true) { handle_view(um.view()); } else { Message msg; const gu::byte_t* b(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); try { (void)msg.unserialize(b, available, 0); } catch (gu::Exception& e) { switch (e.get_errno()) { case EPROTONOSUPPORT: if (prim() == false) { gu_throw_fatal << e.what() << " terminating"; } else { log_warn << "unknown/unsupported protocol version: " << msg.version() << " dropping message"; return; } break; default: GU_TRACE(e); throw; } } if (checksum_ == true && msg.flags() & Message::F_CRC16) { test_checksum(msg, rb, rb.offset()); } try { handle_msg(msg, rb, um); } catch (gu::Exception& e) { log_error << "caught exception in PC, state dump to stderr follows:"; std::cerr << *this << std::endl; throw; } } } int gcomm::pc::Proto::handle_down(Datagram& dg, const ProtoDownMeta& dm) { switch (state()) { case S_CLOSED: case S_NON_PRIM: // Not connected to primary component return ENOTCONN; case S_STATES_EXCH: case S_INSTALL: case S_TRANS: // Transient error return EAGAIN; case S_PRIM: // Allowed to send, fall through break; case S_MAX: gu_throw_fatal << "invalid state " << state(); } if (gu_unlikely(dg.len() > mtu())) { return EMSGSIZE; } uint32_t seq(dm.order() == O_SAFE ? last_sent_seq_ + 1 : last_sent_seq_); UserMessage um(current_view_.version(), seq); push_header(um, dg); if (checksum_ == true) { checksum(um, dg); } int ret = send_down(dg, dm); if (ret == 0) { last_sent_seq_ = seq; } else if (ret != EAGAIN) { log_warn << "Got unexpected error code from send in " "pc::Proto::handle_down(): " << ret; } pop_header(um, dg); return ret; } void gcomm::pc::Proto::sync_param() { gu::Lock lock(sync_param_mutex_); while(param_sync_set_) { lock.wait(sync_param_cond_); } } bool gcomm::pc::Proto::set_param(const std::string& key, const std::string& value, Protolay::sync_param_cb_t& sync_param_cb) { bool ret; if (key == gcomm::Conf::PcIgnoreSb) { ignore_sb_ = gu::from_string(value); conf_.set(gcomm::Conf::PcIgnoreSb, value); return true; } else if (key == gcomm::Conf::PcIgnoreQuorum) { ignore_quorum_ = gu::from_string(value); conf_.set(gcomm::Conf::PcIgnoreQuorum, value); return true; } else if (key == gcomm::Conf::PcBootstrap) { if (state() != S_NON_PRIM) { log_info << "ignoring '" << key << "' in state " << to_string(state()); } else { ret = send_install(true); if (ret != 0) gu_throw_error(ret); } return true; } else if (key == gcomm::Conf::PcWeight) { if (state() != S_PRIM) { gu_throw_error(EAGAIN) << "can't change weightm: state not S_PRIM, retry again"; } else { int w(gu::from_string(value)); if (w < 0 || w > 255) { gu_throw_error(ERANGE) << "value " << w << " for '" << key << "' out of range"; } weight_ = w; { sync_param_cb = boost::bind(&gcomm::pc::Proto::sync_param, this); gu::Lock lock(sync_param_mutex_); param_sync_set_ = true; } ret = send_install(false, weight_); if (ret != 0) { gu::Lock lock(sync_param_mutex_); param_sync_set_ = false; gu_throw_error(ret); } return true; } } else if (key == Conf::PcChecksum || key == Conf::PcAnnounceTimeout || key == Conf::PcLinger || key == Conf::PcNpvo || key == Conf::PcWaitPrim || key == Conf::PcWaitPrimTimeout || key == Conf::PcRecovery) { gu_throw_error(EPERM) << "can't change value for '" << key << "' during runtime"; } return false; } galera-4-26.4.25/gcomm/src/evs_seqno.hpp000644 000164 177776 00000003617 15107057155 021127 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ #ifndef EVS_SEQNO_HPP #define EVS_SEQNO_HPP #include "gcomm/types.hpp" #include "gu_serialize.hpp" #include #include namespace gcomm { namespace evs { typedef int64_t seqno_t; class Range; std::ostream& operator<<(std::ostream&, const Range&); } } /*! * */ class gcomm::evs::Range { public: Range(const seqno_t lu = -1, const seqno_t hs = -1) : lu_(lu), hs_(hs) {} seqno_t lu() const { return lu_; } seqno_t hs() const { return hs_; } void set_lu(const seqno_t s) { lu_ = s; } void set_hs(const seqno_t s) { hs_ = s; } size_t serialize(gu::byte_t* buf, size_t buflen, size_t offset) const { gu_trace(offset = gu::serialize8(lu_, buf, buflen, offset)); gu_trace(offset = gu::serialize8(hs_, buf, buflen, offset)); return offset; } size_t unserialize(const gu::byte_t* buf, size_t buflen, size_t offset) { gu_trace(offset = gu::unserialize8(buf, buflen, offset, lu_)); gu_trace(offset = gu::unserialize8(buf, buflen, offset, hs_)); return offset; } static size_t serial_size() { return 2 * sizeof(seqno_t); } bool operator==(const Range& cmp) const { return (lu_ == cmp.lu_ && hs_ == cmp.hs_); } /** * Return true if the range is empty. This is the case when * - The range is default constructed * - Lowest unseen is greater than highest seen. */ bool is_empty() const { return ((lu_ == -1 && hs_ == -1) || lu_ > hs_); } private: seqno_t lu_; /*!< Lowest unseen seqno */ seqno_t hs_; /*!< Highest seen seqno */ }; inline std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Range& r) { return (os << "[" << r.lu() << "," << r.hs() << "]"); } #endif // EVS_SEQNO_HPP galera-4-26.4.25/gcomm/src/pc_message.hpp000644 000164 177776 00000026174 15107057155 021236 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2012 Codership Oy */ #ifndef PC_MESSAGE_HPP #define PC_MESSAGE_HPP #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/uuid.hpp" #include "gcomm/map.hpp" #include "gu_serialize.hpp" #include "protocol_version.hpp" #include namespace gcomm { namespace pc { class Node; class NodeMap; class Message; class UserMessage; class StateMessage; class InstallMessage; std::ostream& operator<<(std::ostream&, const Node&); std::ostream& operator<<(std::ostream&, const Message&); bool operator==(const Message&, const Message&); } } class gcomm::pc::Node { public: enum Flags { F_PRIM = 0x1, F_WEIGHT = 0x2, F_UN = 0x4, F_EVICTED = 0x8 }; Node(const bool prim = false, const bool un = false, const bool evicted = false, const uint32_t last_seq = std::numeric_limits::max(), const ViewId& last_prim = ViewId(V_NON_PRIM), const int64_t to_seq = -1, const int weight = -1, const SegmentId segment = 0) : prim_ (prim ), un_ (un ), evicted_ (evicted ), last_seq_ (last_seq ), last_prim_ (last_prim), to_seq_ (to_seq ), weight_ (weight), segment_ (segment) { } void set_prim (const bool val) { prim_ = val ; } void set_un (const bool un) { un_ = un ; } void set_evicted (const bool evicted) { evicted_ = evicted ; } void set_last_seq (const uint32_t seq) { last_seq_ = seq ; } void set_last_prim (const ViewId& last_prim) { last_prim_ = last_prim; } void set_to_seq (const uint64_t seq) { to_seq_ = seq ; } void set_weight (const int weight) { weight_ = weight ; } void set_segment (const SegmentId segment) { segment_ = segment ; } bool prim() const { return prim_ ; } bool un() const { return un_ ; } bool evicted() const { return evicted_ ; } uint32_t last_seq() const { return last_seq_ ; } const ViewId& last_prim() const { return last_prim_; } int64_t to_seq() const { return to_seq_ ; } int weight() const { return weight_ ; } SegmentId segment() const { return segment_ ; } // // Serialized header // 0 1 2 3 // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // | flags | segment id | weight ¡ // size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off = offset; uint32_t header; gu_trace (off = gu::unserialize4(buf, buflen, off, header)); prim_ = header & F_PRIM; un_ = header & F_UN; if (header & F_WEIGHT) { weight_ = header >> 24; } else { weight_ = -1; } evicted_ = header & F_EVICTED; segment_ = (header >> 16) & 0xff; gu_trace (off = gu::unserialize4(buf, buflen, off, last_seq_)); gu_trace (off = last_prim_.unserialize(buf, buflen, off)); gu_trace (off = gu::unserialize8(buf, buflen, off, to_seq_)); return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off = offset; uint32_t header = 0; header |= prim_ ? F_PRIM : 0; header |= un_ ? F_UN : 0; if (weight_ >= 0) { header |= F_WEIGHT; header |= weight_ << 24; } header |= evicted_ ? F_EVICTED : 0; header |= static_cast(segment_) << 16; gu_trace (off = gu::serialize4(header, buf, buflen, off)); gu_trace (off = gu::serialize4(last_seq_, buf, buflen, off)); gu_trace (off = last_prim_.serialize(buf, buflen, off)); gu_trace (off = gu::serialize8(to_seq_, buf, buflen, off)); assert (serial_size() == (off - offset)); return off; } static size_t serial_size() { Node* node(reinterpret_cast(0)); // header return (sizeof(uint32_t) + sizeof(node->last_seq_) + ViewId::serial_size() + sizeof(node->to_seq_)); } bool operator==(const Node& cmp) const { return (prim() == cmp.prim() && un() == cmp.un() && last_seq() == cmp.last_seq() && last_prim() == cmp.last_prim() && to_seq() == cmp.to_seq() && weight() == cmp.weight() && segment() == cmp.segment() ); } std::string to_string() const { std::ostringstream ret; ret << "prim=" << prim_ << ",un=" << un_ << ",last_seq=" << last_seq_ << ",last_prim=" << last_prim_ << ",to_seq=" << to_seq_ << ",weight=" << weight_ << ",segment=" << static_cast(segment_); return ret.str(); } private: bool prim_; // Is node in prim comp bool un_; // The prim status of the node is unknown bool evicted_; // Node has been evicted permanently from the group uint32_t last_seq_; // Last seen message seq from the node ViewId last_prim_; // Last known prim comp view id for the node int64_t to_seq_; // Last known TO seq for the node int weight_; // Node weight SegmentId segment_; }; inline std::ostream& gcomm::pc::operator<<(std::ostream& os, const Node& n) { return (os << n.to_string()); } class gcomm::pc::NodeMap : public Map { }; class gcomm::pc::Message { public: enum Type {PC_T_NONE, PC_T_STATE, PC_T_INSTALL, PC_T_USER, PC_T_MAX}; enum { F_CRC16 = 0x1, F_BOOTSTRAP = 0x2, F_WEIGHT_CHANGE = 0x4 }; static const char* to_string(Type t) { static const char* str[PC_T_MAX] = { "NONE", "STATE", "INSTALL", "USER" }; if (t < PC_T_MAX) return str[t]; return "unknown"; } Message(const int version = -1, const Type type = PC_T_NONE, const uint32_t seq = 0, const NodeMap& node_map = NodeMap()) : version_ (version ), flags_ (0 ), type_ (type ), seq_ (seq ), crc16_ (0 ), node_map_(node_map) { // Note: // PC message wire format has room only for version numbers up to 15. // At version 15 (latest) the wire format must change to match // 8 bit version width of EVS. assert(version < 15); } Message(const Message& msg) = default; Message& operator=(const Message&) = default; virtual ~Message() { } int version() const { return version_; } Type type() const { return type_; } uint32_t seq() const { return seq_; } void flags(int flags) { flags_ = flags; } int flags() const { return flags_; } void checksum(uint16_t crc16, bool flag) { crc16_ = crc16; if (flag == true) { flags_ |= F_CRC16; } else { flags_ &= ~F_CRC16; } } uint16_t checksum() const { return crc16_; } const NodeMap& node_map() const { return node_map_; } NodeMap& node_map() { return node_map_; } const Node& node(const UUID& uuid) const { return NodeMap::value(node_map_.find_checked(uuid)); } Node& node(const UUID& uuid) { return NodeMap::value(node_map_.find_checked(uuid)); } size_t unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; uint32_t b; node_map_.clear(); gu_trace (off = gu::unserialize4(buf, buflen, offset, b)); version_ = b & 0x0f; if (version_ > GCOMM_PROTOCOL_MAX_VERSION) gu_throw_error (EPROTONOSUPPORT) << "Unsupported protocol varsion: " << version_; flags_ = (b & 0xf0) >> 4; type_ = static_cast((b >> 8) & 0xff); if (type_ <= PC_T_NONE || type_ >= PC_T_MAX) gu_throw_error (EINVAL) << "Bad type value: " << type_; crc16_ = ((b >> 16) & 0xffff); gu_trace (off = gu::unserialize4(buf, buflen, off, seq_)); if (type_ == PC_T_STATE || type_ == PC_T_INSTALL) { gu_trace (off = node_map_.unserialize(buf, buflen, off)); } return off; } size_t serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; uint32_t b; b = crc16_; b <<= 8; b |= type_ & 0xff; b <<= 8; b |= version_ & 0x0f; b |= (flags_ << 4) & 0xf0; gu_trace (off = gu::serialize4(b, buf, buflen, offset)); gu_trace (off = gu::serialize4(seq_, buf, buflen, off)); if (type_ == PC_T_STATE || type_ == PC_T_INSTALL) { gu_trace (off = node_map_.serialize(buf, buflen, off)); } assert (serial_size() == (off - offset)); return off; } size_t serial_size() const { // header return (sizeof(uint32_t) + sizeof(seq_) + (type_ == PC_T_STATE || type_ == PC_T_INSTALL ? node_map_.serial_size() : 0)); } std::string to_string() const { std::ostringstream ret; ret << "pcmsg{ type=" << to_string(type_) << ", seq=" << seq_; ret << ", flags=" << std::setw(2) << std::hex << flags_; ret << ", node_map {" << node_map() << "}"; ret << '}'; return ret.str(); } private: int version_; // Message version int flags_; // Flags Type type_; // Message type uint32_t seq_; // Message seqno uint16_t crc16_; // 16-bit crc NodeMap node_map_; // Message node map }; inline std::ostream& gcomm::pc::operator<<(std::ostream& os, const Message& m) { return (os << m.to_string()); } class gcomm::pc::StateMessage : public Message { public: StateMessage(int version) : Message(version, Message::PC_T_STATE, 0) {} }; class gcomm::pc::InstallMessage : public Message { public: InstallMessage(int version) : Message(version, Message::PC_T_INSTALL, 0) {} }; class gcomm::pc::UserMessage : public Message { public: UserMessage(int version, uint32_t seq) : Message(version, Message::PC_T_USER, seq) {} }; inline bool gcomm::pc::operator==(const Message& a, const Message& b) { return (a.version() == b.version() && a.checksum() == b.checksum() && a.type() == b.type() && a.seq() == b.seq() && a.node_map() == b.node_map()); } #endif // PC_MESSAGE_HPP galera-4-26.4.25/gcomm/src/evs_node.hpp000644 000164 177776 00000011627 15107057155 020727 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ #ifndef EVS_NODE_HPP #define EVS_NODE_HPP #include "evs_message2.hpp" #include "gcomm/map.hpp" #include "gcomm/uuid.hpp" #include "gu_datetime.hpp" #include "gu_logger.hpp" #include #include namespace gcomm { namespace evs { class Node; class NodeMap; std::ostream& operator<<(std::ostream&, const Node&); class InspectNode; class OperationalSelect; class Proto; } } class gcomm::evs::Node { public: static const size_t invalid_index; Node(const Proto& proto) : proto_ (proto), index_ (invalid_index), operational_ (true), suspected_ (false), inactive_ (false), committed_ (false), installed_ (false), join_message_ (0), leave_message_ (0), delayed_list_message_(0), tstamp_ (gu::datetime::Date::monotonic()), seen_tstamp_ (tstamp_), last_requested_range_tstamp_(), last_requested_range_(), fifo_seq_ (-1), segment_ (0) {} Node(const Node& n); ~Node(); void set_index(const size_t idx) { index_ = idx; } size_t index() const { return index_; } void set_operational(const bool op) { gcomm_assert(op == false); operational_ = op; } bool operational() const { return operational_; } bool suspected() const { return suspected_; } void set_committed(const bool comm) { committed_ = comm; } bool committed() const { return committed_; } void set_installed(const bool inst) { installed_ = inst; } bool installed() const { return installed_; } void set_join_message(const JoinMessage* msg); const JoinMessage* join_message() const { return join_message_; } void set_leave_message(const LeaveMessage* msg); const LeaveMessage* leave_message() const { return leave_message_; } void set_delayed_list_message(const DelayedListMessage* msg); const DelayedListMessage *delayed_list_message() const { return delayed_list_message_; } void set_tstamp(const gu::datetime::Date& t) { tstamp_ = t; } const gu::datetime::Date& tstamp() const { return tstamp_; } void set_seen_tstamp(const gu::datetime::Date& t) { seen_tstamp_ = t; } const gu::datetime::Date& seen_tstamp() const { return seen_tstamp_; } void last_requested_range(const Range& range) { assert(range.is_empty() == false); last_requested_range_tstamp_ = gu::datetime::Date::monotonic(); last_requested_range_ = range; } gu::datetime::Date last_requested_range_tstamp() const { return last_requested_range_tstamp_; } const Range& last_requested_range() const { return last_requested_range_; } void set_fifo_seq(const int64_t seq) { fifo_seq_ = seq; } int64_t fifo_seq() const { return fifo_seq_; } SegmentId segment() const { return segment_; } bool is_inactive() const; bool is_suspected() const; private: void operator=(const Node&); friend class InspectNode; const Proto& proto_; // Index for input map size_t index_; // True if instance is considered to be operational (has produced messages) bool operational_; bool suspected_; bool inactive_; // True if it is known that the instance has committed to install message bool committed_; // True if it is known that the instance has installed current view bool installed_; // Last received JOIN message JoinMessage* join_message_; // Leave message LeaveMessage* leave_message_; // Delayed list message DelayedListMessage* delayed_list_message_; // Timestamp denoting the last time a message from node // advanced input map state or membership protocol. This is used // for determining if the node should become suspected/inactive. gu::datetime::Date tstamp_; // Timestamp denoting the time when the node was seen last time. // This is used to decide if the node should be considered delayed. gu::datetime::Date seen_tstamp_; // Last time the gap message requesting a message resend/recovery // was sent to this node. gu::datetime::Date last_requested_range_tstamp_; // Last requested (non-empty) range. Range last_requested_range_; int64_t fifo_seq_; SegmentId segment_; }; class gcomm::evs::NodeMap : public Map { }; class gcomm::evs::OperationalSelect { public: OperationalSelect(NodeMap& nm_) : nm(nm_) { } void operator()(const NodeMap::value_type& vt) const { if (NodeMap::value(vt).operational() == true) { nm.insert_unique(vt); } } private: NodeMap& nm; }; class gcomm::evs::InspectNode { public: void operator()(std::pair& p) const; }; #endif // EVS_NODE_HPP galera-4-26.4.25/gcomm/src/asio_udp.hpp000644 000164 177776 00000003356 15107057155 020730 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2017 Codership Oy */ #ifndef GCOMM_ASIO_UDP_HPP #define GCOMM_ASIO_UDP_HPP #include "socket.hpp" #include "asio_protonet.hpp" #include "gu_shared_ptr.hpp" #include #include "gu_disable_non_virtual_dtor.hpp" #include "gu_compiler.hpp" namespace gcomm { class AsioUdpSocket; class AsioProtonet; } class gcomm::AsioUdpSocket : public gcomm::Socket, public gu::AsioDatagramSocketHandler, public std::enable_shared_from_this { public: AsioUdpSocket(AsioProtonet& net, const gu::URI& uri); ~AsioUdpSocket(); // Socket interface virtual void connect(const gu::URI& uri) GALERA_OVERRIDE; virtual void close() GALERA_OVERRIDE; virtual void set_option(const std::string&, const std::string&) GALERA_OVERRIDE { /* not implemented */ } virtual int send(int segment, const Datagram& dg) GALERA_OVERRIDE; virtual void async_receive() GALERA_OVERRIDE; virtual size_t mtu() const GALERA_OVERRIDE; virtual std::string local_addr() const GALERA_OVERRIDE; virtual std::string remote_addr() const GALERA_OVERRIDE; virtual State state() const GALERA_OVERRIDE { return state_; } virtual SocketId id() const GALERA_OVERRIDE { return &socket_; } virtual SocketStats stats() const GALERA_OVERRIDE { return SocketStats(); } private: // AsioDatagramSocketHandler virtual void read_handler(gu::AsioDatagramSocket&, const gu::AsioErrorCode&, size_t) GALERA_OVERRIDE; AsioProtonet& net_; State state_; std::shared_ptr socket_; std::vector recv_buf_; }; #include "gu_enable_non_virtual_dtor.hpp" #endif // GCOMM_ASIO_UDP_HPP galera-4-26.4.25/gcomm/src/SConscript000644 000164 177776 00000002165 15107057155 020423 0ustar00jenkinsnogroup000000 000000 # Import('env') libgcomm_env = env.Clone() # Include paths libgcomm_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcomm/src ''')) libgcomm_env.Append(CXXFLAGS = ' -fno-strict-aliasing') libgcomm_sources = [ 'conf.cpp', 'defaults.cpp', 'datagram.cpp', 'evs_consensus.cpp', 'evs_input_map2.cpp', 'evs_message2.cpp', 'evs_node.cpp', 'evs_proto.cpp', 'gmcast.cpp', 'gmcast_proto.cpp', 'pc.cpp', 'pc_proto.cpp', 'protonet.cpp', 'protostack.cpp', 'transport.cpp', 'uuid.cpp', 'view.cpp', 'socket.cpp'] if '-DHAVE_ASIO_HPP' in libgcomm_env['CPPFLAGS']: # ASIO sources need to be built with relaxed C++ flags libgcomm_sources.extend([ 'asio_tcp.cpp', 'asio_udp.cpp', 'asio_protonet.cpp']) libgcomm_env.StaticLibrary('gcomm', libgcomm_sources) env.Append(LIBGALERA_OBJS = libgcomm_env.SharedObject(libgcomm_sources)) galera-4-26.4.25/gcomm/src/fair_send_queue.hpp000644 000164 177776 00000007710 15107057155 022261 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2019 Codership Oy // /** * Segmentation aware send queue implementation. * * In order to avoid the segment relay node of hogging all bandwidth * for bulk transfers, the send queue needs to be aware of segments. * FairSendQueue implements a queue which maintains separate queue * for each segment. Messages are read from queues in round robin. */ #ifndef GCOMM_FAIR_SEND_QUEUE_HPP #define GCOMM_FAIR_SEND_QUEUE_HPP #include "gcomm/datagram.hpp" #include #include namespace gcomm { class FairSendQueue { typedef std::map > queue_type; public: FairSendQueue() : current_segment_(-1) , last_pushed_segment_(-1) , queued_bytes_() , queue_() { } /* Push back datagram dg from segment. */ void push_back(int segment, const gcomm::Datagram& dg) { assert(current_segment_ != -1 || empty()); assert(queued_bytes_ || empty()); std::deque& dq(queue_[segment]); dq.push_back(dg); if (current_segment_ == -1) { current_segment_ = segment; } last_pushed_segment_ = segment; queued_bytes_ += dg.len(); } /* Return reference to front datagram. */ const gcomm::Datagram& front() const { assert(current_segment_ != -1); queue_type::const_iterator i(queue_.find(current_segment_)); assert(i != queue_.end()); return i->second.front(); } /* Return reference to back datagram. */ const gcomm::Datagram& back() const { assert(last_pushed_segment_ != -1); queue_type::const_iterator i(queue_.find(last_pushed_segment_)); assert(i != queue_.end()); return i->second.back(); } /* Pop front element from the queue. */ void pop_front() { assert(current_segment_ != -1); assert(not queue_[current_segment_].empty()); std::deque& que(queue_[current_segment_]); assert(que.front().len() <= queued_bytes_); queued_bytes_ -= que.front().len(); que.pop_front(); current_segment_ = get_next_segment(); } /* Return true if queue is empty. */ bool empty() const { return (queued_bytes() == 0); } /* Return queue size. */ size_t size() const { size_t ret(0); for (queue_type::const_iterator i(queue_.begin()); i != queue_.end(); ++i) { ret += i->second.size(); } return ret; } size_t queued_bytes() const { return queued_bytes_; } /* Return number of queued messages for each segment. */ std::vector > segments() const { std::vector > ret; for (queue_type::const_iterator i(queue_.begin()); i != queue_.end(); ++i) { ret.push_back(std::make_pair(i->first, i->second.size())); } return ret; } private: int get_next_segment() const { queue_type::const_iterator i(queue_.find(current_segment_)); assert(i != queue_.end()); do { // Increment and wrap around ++i; if (i == queue_.end()) i = queue_.begin(); if (not i->second.empty()) return i->first; } while (i->first != current_segment_); return -1; } int current_segment_; int last_pushed_segment_; size_t queued_bytes_; queue_type queue_; }; } #endif /* GCOMM_FAIR_SEND_QUEUE */ galera-4-26.4.25/gcomm/src/evs_node.cpp000644 000164 177776 00000006736 15107057155 020727 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ #include "evs_node.hpp" #include "evs_proto.hpp" #include "evs_message2.hpp" #include const size_t gcomm::evs::Node::invalid_index(std::numeric_limits::max()); std::ostream& gcomm::evs::operator<<(std::ostream& os, const gcomm::evs::Node& n) { os << "{"; os << "o=" << n.operational() << ","; os << "s=" << n.suspected() << ","; os << "i=" << n.installed() << ","; os << "fs=" << n.fifo_seq() << ","; if (n.join_message() != 0) { os << "jm=\n" << *n.join_message() << ",\n"; } if (n.leave_message() != 0) { os << "lm=\n" << *n.leave_message() << ",\n"; } os << "}"; return os; } gcomm::evs::Node::Node(const Node& n) : proto_ (n.proto_), index_ (n.index_), operational_ (n.operational_), suspected_ (n.suspected_), inactive_ (n.inactive_), committed_ (n.committed_), installed_ (n.installed_), join_message_ (n.join_message_ != 0 ? new JoinMessage(*n.join_message_) : 0), leave_message_ (n.leave_message_ != 0 ? new LeaveMessage(*n.leave_message_) : 0), delayed_list_message_ (n.delayed_list_message_ != 0 ? new DelayedListMessage(*n.delayed_list_message_) : 0), tstamp_ (n.tstamp_), seen_tstamp_ (n.seen_tstamp_), last_requested_range_tstamp_(), last_requested_range_(), fifo_seq_ (n.fifo_seq_), segment_ (n.segment_) { } gcomm::evs::Node::~Node() { delete join_message_; delete leave_message_; delete delayed_list_message_; } void gcomm::evs::Node::set_join_message(const JoinMessage* jm) { if (join_message_ != 0) { delete join_message_; } if (jm != 0) { join_message_ = new JoinMessage(*jm); } else { join_message_ = 0; } } void gcomm::evs::Node::set_leave_message(const LeaveMessage* lm) { if (leave_message_ != 0) { delete leave_message_; } if (lm != 0) { leave_message_ = new LeaveMessage(*lm); } else { leave_message_ = 0; } } void gcomm::evs::Node::set_delayed_list_message(const DelayedListMessage* elm) { if (delayed_list_message_ != 0) { delete delayed_list_message_; } delayed_list_message_ = (elm == 0 ? 0 : new DelayedListMessage(*elm)); } bool gcomm::evs::Node::is_suspected() const { return suspected_; } bool gcomm::evs::Node::is_inactive() const { return inactive_; } void gcomm::evs::InspectNode::operator()(std::pair& p) const { Node& node(p.second); gu::datetime::Date now(gu::datetime::Date::monotonic()); if (node.tstamp() + node.proto_.suspect_timeout_ < now) { if (node.suspected_ == false) { log_debug << "declaring node with index " << node.index_ << " suspected, timeout " << node.proto_.suspect_timeout_; } node.suspected_ = true; } else { node.suspected_ = false; } if (node.tstamp() + node.proto_.inactive_timeout_ < now) { if (node.inactive_ == false) { log_debug << "declaring node with index " << node.index_ << " inactive "; } node.inactive_ = true; } else { node.inactive_ = false; } } galera-4-26.4.25/gcomm/src/evs_input_map2.hpp000644 000164 177776 00000022433 15107057155 022055 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy * * $Id$ */ /*! * @file Input map for EVS messaging. Provides simple interface for * handling messages with different safety guarantees. * * @note When operating with iterators, note that evs::Message * accessed through iterator may have different sequence * number as it position dictates. Use sequence number * found from key part. * * @todo Fix issue in above note if feasible. */ #ifndef EVS_INPUT_MAP2_HPP #define EVS_INPUT_MAP2_HPP #include "evs_message2.hpp" #include "gcomm/map.hpp" #include "gcomm/datagram.hpp" #include namespace gcomm { /* Forward declarations */ class InputMapMsgKey; std::ostream& operator<<(std::ostream&, const InputMapMsgKey&); namespace evs { class InputMapMsg; std::ostream& operator<<(std::ostream&, const InputMapMsg&); class InputMapMsgIndex; class InputMapNode; std::ostream& operator<<(std::ostream&, const InputMapNode&); typedef std::vector InputMapNodeIndex; std::ostream& operator<<(std::ostream&, const InputMapNodeIndex&); class InputMap; std::ostream& operator<<(std::ostream&, const InputMap&); } } /* Internal msg representation */ class gcomm::InputMapMsgKey { public: InputMapMsgKey(const size_t index, const evs::seqno_t seq) : index_ (index), seq_ (seq) { } size_t index() const { return index_; } evs::seqno_t seq () const { return seq_; } bool operator<(const InputMapMsgKey& cmp) const { return (seq_ < cmp.seq_ || (seq_ == cmp.seq_ && index_ < cmp.index_)); } private: size_t const index_; evs::seqno_t const seq_; }; /* Internal message representation */ class gcomm::evs::InputMapMsg { public: InputMapMsg(const UserMessage& msg, const Datagram& rb) : msg_(msg), rb_ (rb) { } InputMapMsg(const InputMapMsg& m) : msg_(m.msg_), rb_ (m.rb_) { } ~InputMapMsg() { } const UserMessage& msg () const { return msg_; } const Datagram& rb () const { return rb_; } private: void operator=(const InputMapMsg&); UserMessage const msg_; Datagram rb_; }; #if defined(GALERA_USE_BOOST_POOL_ALLOC) #include class gcomm::evs::InputMapMsgIndex : public Map, boost::fast_pool_allocator< std::pair, boost::default_user_allocator_new_delete, boost::details::pool::null_mutex > > > {}; #else /* GALERA_USE_BOOST_POOL_ALLOC */ class gcomm::evs::InputMapMsgIndex : public Map {}; #endif /* GALERA_USE_BOOST_POOL_ALLOC */ /* Internal node representation */ class gcomm::evs::InputMapNode { public: InputMapNode() : idx_(), range_(0, -1), safe_seq_(-1) { } void set_range (const Range r) { range_ = r; } void set_safe_seq (const seqno_t s) { safe_seq_ = s; } void set_index (const size_t i) { idx_ = i; } Range range () const { return range_; } seqno_t safe_seq () const { return safe_seq_; } size_t index () const { return idx_; } private: size_t idx_; Range range_; seqno_t safe_seq_; }; /*! * Input map for messages. * */ class gcomm::evs::InputMap { public: /* Iterators exposed to user */ typedef InputMapMsgIndex::iterator iterator; typedef InputMapMsgIndex::const_iterator const_iterator; /*! * Default constructor. */ InputMap(); /*! * Default destructor. */ ~InputMap(); /*! * Get current value of aru_seq. * * @return Current value of aru_seq */ seqno_t aru_seq () const { return aru_seq_; } /*! * Get current value of safe_seq. * * @return Current value of safe_seq */ seqno_t safe_seq() const { return safe_seq_; } /*! * Set sequence number safe for node. * * @param uuid Node uuid * @param seq Sequence number to be set safe * * @throws FatalException if node was not found or sequence number * was not in the allowed range */ void set_safe_seq(const size_t uuid, const seqno_t seq); /*! * Get current value of safe_seq for node. * * @param uuid Node uuid * * @return Safe sequence number for node * * @throws FatalException if node was not found */ seqno_t safe_seq(const size_t uuid) const { return node_index_->at(uuid).safe_seq(); } /*! * Get current range parameter for node * * @param uuid Node uuid * * @return Range parameter for node * * @throws FatalException if node was not found */ Range range (const size_t uuid) const { return node_index_->at(uuid).range(); } seqno_t min_hs() const; seqno_t max_hs() const; /*! * Get iterator to the beginning of the input map * * @return Iterator pointing to the first element */ iterator begin() const { return msg_index_->begin(); } /*! * Get iterator next to the last element of the input map * * @return Iterator pointing past the last element */ iterator end () const { return msg_index_->end(); } /*! * Check if message pointed by iterator fulfills O_SAFE condition. * * @return True or false */ bool is_safe (iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); return (seq <= safe_seq_); } /*! * Check if message pointed by iterator fulfills O_AGREED condition. * * @return True or false */ bool is_agreed(iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); return (seq <= aru_seq_); } /*! * Check if message pointed by iterator fulfills O_FIFO condition. * * @return True or false */ bool is_fifo (iterator i) const { const seqno_t seq(InputMapMsgIndex::key(i).seq()); const InputMapNode& node((*node_index_)[ InputMapMsgIndex::key(i).index()]); return (node.range().lu() > seq); } /*! * Insert new message into input map. * * @param uuid Node uuid of the message source * @param msg EVS message * @param rb ReadBuf pointer associated to message * @param offset Offset to the beginning of the payload * * @return Range parameter of the node * * @throws FatalException if node not found or message sequence * number is out of allowed range */ Range insert(const size_t uuid, const UserMessage& msg, const Datagram& dg = Datagram()); /*! * Erase message pointed by iterator. Note that message may still * be recovered through recover() method as long as it does not * fulfill O_SAFE constraint. * * @param i Iterator * * @throws FatalException if iterator is not valid */ void erase(iterator i); /*! * Find message. * * @param uuid Message source node uuid * @param seq Message sequence numeber * * @return Iterator pointing to message or at end() if message was not found * * @throws FatalException if node was not found */ iterator find(const size_t uuid, const seqno_t seq) const; /*! * Recover message. * * @param uuid Message source node uuid * @param seq Message sequence number * * @return Iterator pointing to the message * * @throws FatalException if node or message was not found */ iterator recover(const size_t uuid, const seqno_t seq) const; /*! * Return list of ranges for missing messages. * * @param index Index of the node. * @param range Range to be scanned for missing messages. */ std::vector gap_range_list(size_t index, const Range& range) const; /*! * Reset the input map. * * @param nodes Number of nodes in the new configuration. */ void reset(const size_t nodes); /*! * Clear input map state. */ void clear(); private: friend std::ostream& operator<<(std::ostream&, const InputMap&); /* Non-copyable */ InputMap(const InputMap&); void operator=(const InputMap&); /*! * Update aru_seq value to represent current state. */ void update_aru(); /*! * Clean up recovery index. All messages up to safe_seq are removed. */ void cleanup_recovery_index(); seqno_t safe_seq_; /*!< Safe seqno */ seqno_t aru_seq_; /*!< All received up to seqno */ InputMapNodeIndex* node_index_; /*!< Index of nodes */ InputMapMsgIndex* msg_index_; /*!< Index of messages */ InputMapMsgIndex* recovery_index_; /*!< Recovery index */ }; #endif // EVS_INPUT_MAP2_HPP galera-4-26.4.25/gcomm/src/asio_tcp.hpp000644 000164 177776 00000011010 15107057155 020710 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2024 Codership Oy */ #ifndef GCOMM_ASIO_TCP_HPP #define GCOMM_ASIO_TCP_HPP #include "socket.hpp" #include "asio_protonet.hpp" #include "fair_send_queue.hpp" #include "gu_array.hpp" #include "gu_shared_ptr.hpp" #include #include #include "gu_disable_non_virtual_dtor.hpp" #include "gu_compiler.hpp" /** * Configuration value denoting automatic buffer size adjustment for * socket.recv_buf_size and socket.send_buf_size. */ #define GCOMM_ASIO_AUTO_BUF_SIZE "auto" namespace gcomm { class AsioTcpSocket; class AsioTcpAcceptor; class AsioPostForSendHandler; } // TCP Socket implementation class gcomm::AsioTcpSocket : public gcomm::Socket, public gu::AsioSocketHandler, public std::enable_shared_from_this { public: AsioTcpSocket(AsioProtonet& net, const gu::URI& uri); AsioTcpSocket(AsioProtonet& net, const gu::URI& uri, const std::shared_ptr&); ~AsioTcpSocket(); void failed_handler(const gu::AsioErrorCode& ec, const std::string& func, int line); // Socket interface virtual void connect(const gu::URI& uri) GALERA_OVERRIDE; virtual void close() GALERA_OVERRIDE; virtual void set_option(const std::string& key, const std::string& val) GALERA_OVERRIDE; virtual int send(int segment, const Datagram& dg) GALERA_OVERRIDE; virtual void async_receive() GALERA_OVERRIDE; virtual size_t mtu() const GALERA_OVERRIDE; virtual std::string local_addr() const GALERA_OVERRIDE; virtual std::string remote_addr() const GALERA_OVERRIDE; virtual State state() const GALERA_OVERRIDE { return state_; } virtual SocketId id() const GALERA_OVERRIDE { return &socket_; } virtual SocketStats stats() const GALERA_OVERRIDE; private: // AsioSocketHandler interface virtual void connect_handler(gu::AsioSocket&, const gu::AsioErrorCode&) GALERA_OVERRIDE; virtual void write_handler(gu::AsioSocket&, const gu::AsioErrorCode&, size_t) GALERA_OVERRIDE; virtual size_t read_completion_condition(gu::AsioSocket&, const gu::AsioErrorCode&, size_t) GALERA_OVERRIDE; virtual void read_handler(gu::AsioSocket&, const gu::AsioErrorCode&, size_t) GALERA_OVERRIDE; // friend class gcomm::AsioTcpAcceptor; friend class gcomm::AsioPostForSendHandler; AsioTcpSocket(const AsioTcpSocket&); void operator=(const AsioTcpSocket&); void set_buf_sizes(); void init_tstamps() { gu::datetime::Date now(gu::datetime::Date::monotonic()); last_queued_tstamp_ = last_delivered_tstamp_ = now; } void cancel_deferred_close_timer(); void become_closed(); AsioProtonet& net_; std::shared_ptr socket_; // Limit the number of queued bytes. This workaround to avoid queue // pile up due to frequent retransmissions by the upper layers (evs). // It is a responsibility of upper layers (evs) to request resending // of dropped messaes. Upper limit (32MB) is enough to hold 1024 // datagrams with default gcomm MTU 32kB. static const size_t max_send_q_bytes = (1 << 25); gcomm::FairSendQueue send_q_; gu::datetime::Date last_queued_tstamp_; std::vector recv_buf_; size_t recv_offset_; gu::datetime::Date last_delivered_tstamp_; State state_; class DeferredCloseTimer; std::weak_ptr deferred_close_timer_; }; class gcomm::AsioTcpAcceptor : public gcomm::Acceptor , public gu::AsioAcceptorHandler , public std::enable_shared_from_this { public: AsioTcpAcceptor(AsioProtonet& net, const gu::URI& uri); ~AsioTcpAcceptor(); void set_buf_sizes(); void listen(const gu::URI& uri); std::string listen_addr() const; void close(); SocketPtr accept(); State state() const { gu_throw_fatal << "TODO:"; } SocketId id() const { return &acceptor_; } private: void accept_handler( gu::AsioAcceptor&, const std::shared_ptr&, const gu::AsioErrorCode& error); AsioProtonet& net_; std::shared_ptr acceptor_; std::shared_ptr next_socket_; }; #include "gu_enable_non_virtual_dtor.hpp" #endif // GCOMM_ASIO_TCP_HPP galera-4-26.4.25/gcomm/src/view.cpp000644 000164 177776 00000022142 15107057155 020064 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2018 Codership Oy */ #include "common/common.h" #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/util.hpp" #include "gu_logger.hpp" #include "gu_exception.hpp" #include #include size_t gcomm::ViewId::unserialize(const gu::byte_t* buf, const size_t buflen, const size_t offset) { size_t off; gu_trace (off = uuid_.unserialize(buf, buflen, offset)); uint32_t w; gu_trace (off = gu::unserialize4(buf, buflen, off, w)); seq_ = w & 0x3fffffff; type_ = static_cast(w >> 30); return off; } size_t gcomm::ViewId::serialize(gu::byte_t* buf, const size_t buflen, const size_t offset) const { size_t off; gcomm_assert(type_ != V_NONE); gu_trace (off = uuid_.serialize(buf, buflen, offset)); uint32_t w((seq_ & 0x3fffffff) | (type_ << 30)); gu_trace (off = gu::serialize4(w, buf, buflen, off)); return off; } static std::string to_string(const gcomm::ViewType type) { switch (type) { case gcomm::V_TRANS: return "TRANS"; case gcomm::V_REG: return "REG"; case gcomm::V_NON_PRIM: return "NON_PRIM"; case gcomm::V_PRIM: return "PRIM"; default: return "UNKNOWN"; // gcomm_throw_fatal << "Invalid type value"; } } std::ostream& gcomm::operator<<(std::ostream& os, const gcomm::ViewId& vi) { return (os << "view_id(" << ::to_string(vi.type()) << "," << vi.uuid() << "," << vi.seq()) << ")"; } void gcomm::View::add_member(const UUID& pid, SegmentId segment) { gu_trace((void)members_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_members(NodeList::const_iterator begin, NodeList::const_iterator end) { for (NodeList::const_iterator i = begin; i != end; ++i) { gu_trace((void)members_.insert_unique( std::make_pair(NodeList::key(i), NodeList::value(i)))); } } void gcomm::View::add_joined(const UUID& pid, SegmentId segment) { gu_trace((void)joined_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_left(const UUID& pid, SegmentId segment) { gu_trace((void)left_.insert_unique(std::make_pair(pid, Node(segment)))); } void gcomm::View::add_partitioned(const UUID& pid, SegmentId segment) { gu_trace((void)partitioned_.insert_unique(std::make_pair(pid, Node(segment)))); } const gcomm::NodeList& gcomm::View::members() const { return members_; } const gcomm::NodeList& gcomm::View::joined() const { return joined_; } const gcomm::NodeList& gcomm::View::left() const { return left_; } const gcomm::NodeList& gcomm::View::partitioned() const { return partitioned_; } gcomm::ViewType gcomm::View::type() const { return view_id_.type(); } const gcomm::ViewId& gcomm::View::id() const { return view_id_; } const gcomm::UUID& gcomm::View::representative() const { if (members_.empty()) { return UUID::nil(); } else { return NodeList::key(members_.begin()); } } bool gcomm::View::is_empty() const { return (view_id_.uuid() == UUID::nil() && members_.size() == 0); } bool gcomm::operator==(const gcomm::View& a, const gcomm::View& b) { return (a.id() == b.id() && a.members() == b.members() && a.joined() == b.joined() && a.left() == b.left() && a.partitioned() == b.partitioned()); } std::ostream& gcomm::operator<<(std::ostream& os, const gcomm::View& view) { os << "view("; if (view.is_empty() == true) { os << "(empty)"; } else { os << view.id(); os << " memb {\n"; os << view.members(); os << "} joined {\n"; os << view.joined(); os << "} left {\n"; os << view.left(); os << "} partitioned {\n"; os << view.partitioned(); os << "}"; } os << ")"; return os; } std::ostream& gcomm::View::write_stream(std::ostream& os) const { os << "#vwbeg" << std::endl; os << "view_id: "; view_id_.write_stream(os) << std::endl; os << "bootstrap: " << bootstrap_ << std::endl; for(NodeList::const_iterator it = members_.begin(); it != members_.end(); ++it) { const UUID& uuid(it -> first); const Node& node(it -> second); os << "member: "; uuid.print(os) << " "; node.write_stream(os) << std::endl; } os << "#vwend" << std::endl; return os; } std::istream& gcomm::View::read_stream(std::istream& is) { std::string line; while(is.good()) { getline(is, line); std::istringstream istr(line); std::string param; istr >> param; if (param == "#vwbeg") continue; else if (param == "#vwend") break; if (param == "view_id:") { view_id_.read_stream(istr); } else if (param == "bootstrap:") { istr >> bootstrap_; } else if (param == "member:") { UUID uuid; Node node(0); uuid.scan(istr); node.read_stream(istr); add_member(uuid, node.segment()); } } return is; } std::ostream& gcomm::ViewState::write_stream(std::ostream& os) const { os << "my_uuid: "; my_uuid_.print(os) << std::endl; view_.write_stream(os); return os; } std::istream& gcomm::ViewState::read_stream(std::istream& is) { std::string param; std::string line; while(is.good()) { getline(is, line); std::istringstream istr(line); istr >> param; if (param == "my_uuid:") { my_uuid_.scan(istr); } else if (param == "#vwbeg") { // read from next line. view_.read_stream(is); } } return is; } std::string gcomm::ViewState::get_viewstate_file_name(gu::Config& conf) { std::string dir_name = COMMON_BASE_DIR_DEFAULT; try { // If base_dir is set in the configuration we should use // it instead of current directory default. dir_name = conf.get(COMMON_BASE_DIR_KEY, dir_name); } catch (const gu::NotFound &) { // In case it is not known we do not have to do // anything and use default. } return dir_name + '/' + COMMON_VIEW_STAT_FILE; } void gcomm::ViewState::write_file() const { // write to temporary file first. std::string tmp(file_name_ + ".tmp"); FILE* fout = fopen(tmp.c_str(), "w"); if (fout == NULL) { log_warn << "open file(" << tmp << ") failed(" << strerror(errno) << ")"; return ; } std::ostringstream os; try { write_stream(os); } catch (const std::exception& e) { log_warn << "write ostringstream failed(" << e.what() << ")"; fclose(fout); return ; } std::string content(os.str()); if (fwrite(content.c_str(), content.size(), 1, fout) == 0) { log_warn << "write file(" << tmp << ") failed(" << strerror(errno) << ")"; fclose(fout); return ; } if (fflush(fout) != 0) { log_warn << "fflush file(" << tmp << ") failed(" << strerror(errno) << ")"; fclose(fout); return ; } if (fsync(fileno(fout)) < 0) { log_warn << "fsync file(" << tmp << ") failed(" << strerror(errno) << ")"; fclose(fout); return ; } if (fclose(fout) != 0){ log_warn << "close file(" << tmp << ") failed(" << strerror(errno) << ")"; return ; } // rename atomically. if (rename(tmp.c_str(), file_name_.c_str()) != 0) { log_warn << "rename file(" << tmp << ") to file(" << file_name_ << ") failed(" << strerror(errno) << ")"; } } bool gcomm::ViewState::read_file() { if (access(file_name_.c_str(), R_OK) != 0) { int const errn(errno); std::ostringstream msg; msg << "access file(" << file_name_ << ") failed(" << strerror(errn) << ")"; if (ENOENT == errn) { // absence of a file should be only a notice since it is removed // on graceful shutdown, so it is an expected situation log_info << msg.str(); } else { log_warn << msg.str(); } return false; } try { std::ifstream ifs(file_name_.c_str(), std::ifstream::in); read_stream(ifs); ifs.close(); return true; } catch (const std::exception& e) { log_warn << "read file(" << file_name_ << ") failed(" << e.what() << ")"; return false; } } // remove_file is static function, it should remove the view // state file even if there is no ViewState object around. // View state file name is derived in the same way as for // ViewState object. void gcomm::ViewState::remove_file(gu::Config& conf) { std::string file_name = get_viewstate_file_name(conf); (void) unlink(file_name.c_str()); } galera-4-26.4.25/gcomm/src/protostack.cpp000644 000164 177776 00000003600 15107057155 021301 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #include "gcomm/protostack.hpp" #include "socket.hpp" #include "gcomm/util.hpp" void gcomm::Protostack::push_proto(Protolay* p) { Critical crit(*this); protos_.push_front(p); // connect the pushed Protolay that's now on top // with the one that was previously on top, // if we had one, of course. if (protos_.size() > 1) { gcomm::connect(protos_[1], p); } } void gcomm::Protostack::pop_proto(Protolay* p) { Critical crit(*this); assert(protos_.front() == p); if (protos_.front() != p) { log_warn << "Protolay " << p << " is not protostack front"; return; } protos_.pop_front(); if (protos_.begin() != protos_.end()) { gcomm::disconnect(*protos_.begin(), p); } } gu::datetime::Date gcomm::Protostack::handle_timers() { gu::datetime::Date ret(gu::datetime::Date::max()); Critical crit(*this); for (std::deque::reverse_iterator i = protos_.rbegin(); i != protos_.rend(); ++i) { gu::datetime::Date t((*i)->handle_timers()); if (t < ret) ret = t; } return ret; } void gcomm::Protostack::dispatch(const void* id, const Datagram& dg, const ProtoUpMeta& um) { Critical crit(*this); if (protos_.empty() == false) { protos_.back()->handle_up(id, dg, um); } } bool gcomm::Protostack::set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb) { bool ret(false); for (std::deque::iterator i(protos_.begin()); i != protos_.end(); ++i) { ret |= (*i)->set_param(key, val, sync_param_cb); } return ret; } galera-4-26.4.25/gcomm/src/gmcast_link.hpp000644 000164 177776 00000004745 15107057155 021423 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #ifndef GCOMM_GMCAST_LINK_HPP #define GCOMM_GMCAST_LINK_HPP #include "gcomm/uuid.hpp" #include #include namespace gcomm { namespace gmcast { class Link; class LinkMapCmp; class LinkMap; std::ostream& operator<<(std::ostream& os, const LinkMap&); } } class gcomm::gmcast::Link { public: Link(const gcomm::UUID& uuid, const std::string& addr, const std::string& mcast_addr) : uuid_ (uuid), addr_ (addr), mcast_addr_(mcast_addr) { } bool operator==(const Link& cmp) const { return (uuid_ == cmp.uuid_ && addr_ == cmp.addr_); } bool operator<(const Link& cmp) const { return (uuid_ < cmp.uuid_ || (uuid_ == cmp.uuid_ && addr_ < cmp.addr_)); } const gcomm::UUID& uuid() const { return uuid_; } const std::string& addr() const { return addr_; } const std::string& mcast_addr() const { return mcast_addr_; } private: UUID uuid_; std::string addr_; std::string mcast_addr_; }; class gcomm::gmcast::LinkMap { typedef std::set MType; public: LinkMap() : link_map_() { } typedef MType::iterator iterator; typedef MType::const_iterator const_iterator; typedef MType::value_type value_type; std::pair insert(const Link& i) { return link_map_.insert(i); } iterator begin() { return link_map_.begin(); } const_iterator begin() const { return link_map_.begin(); } iterator end() { return link_map_.end(); } const_iterator end() const { return link_map_.end(); } const_iterator find(const value_type& vt) const { return link_map_.find(vt); } size_t size() const { return link_map_.size(); } static const UUID& key(const_iterator i) { return i->uuid(); } static const Link& value(const_iterator i) { return *i; } static const UUID& key(const value_type& vt) { return vt.uuid(); } static const Link& value(const value_type& vt) { return vt; } bool operator==(const LinkMap& cmp) const { return (link_map_ == cmp.link_map_); } private: MType link_map_; }; inline std::ostream& gcomm::gmcast::operator<<(std::ostream& os, const LinkMap& lm) { for (LinkMap::const_iterator i = lm.begin(); i != lm.end(); ++i) { os << "\n(" << LinkMap::key(i) << "," << LinkMap::value(i).addr() << ")"; } return (os << "\n"); } #endif // GCOMM_GMCAST_LINK_HPP galera-4-26.4.25/gcomm/src/evs_proto.cpp000644 000164 177776 00000521427 15107057155 021144 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy */ #include "evs_proto.hpp" #include "evs_message2.hpp" #include "evs_input_map2.hpp" #include "gcomm/transport.hpp" #include "gcomm/conf.hpp" #include "gcomm/util.hpp" #include "defaults.hpp" #include #include #include #include #include #include #include // std::cerr using namespace std::rel_ops; // Convenience macros for debug and info logging #define evs_log_debug(__mask__) \ if ((debug_mask_ & (__mask__)) == 0) { } \ else log_debug << self_string() << ": " #define evs_log_info(__mask__) \ if ((info_mask_ & (__mask__)) == 0) { } \ else log_info << self_string() << ": " gcomm::evs::Proto::Proto(gu::Config& conf, const UUID& my_uuid, SegmentId segment, const gu::URI& uri, const size_t mtu, const View* rst_view) : Protolay(conf), timers_(), version_(check_range(Conf::EvsVersion, param(conf, uri, Conf::EvsVersion, Defaults::EvsVersion), 0, GCOMM_PROTOCOL_MAX_VERSION + 1)), debug_mask_(param(conf, uri, Conf::EvsDebugLogMask, Defaults::EvsDebugLogMask, std::hex)), info_mask_(param(conf, uri, Conf::EvsInfoLogMask, Defaults::EvsInfoLogMask, std::hex)), last_stats_report_(gu::datetime::Date::monotonic()), collect_stats_(true), hs_agreed_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), hs_safe_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), hs_local_causal_("0.0,0.0001,0.00031623,0.001,0.0031623,0.01,0.031623,0.1,0.31623,1.,3.1623,10.,31.623"), safe_deliv_latency_(), send_queue_s_(0), n_send_queue_s_(0), sent_msgs_(Message::num_message_types, 0), retrans_msgs_(0), recovered_msgs_(0), recvd_msgs_(Message::num_message_types, 0), delivered_msgs_(O_LOCAL_CAUSAL + 1), delivering_(false), my_uuid_(my_uuid), segment_(segment), known_(), self_i_(), view_forget_timeout_( check_range(Conf::EvsViewForgetTimeout, param( conf, uri, Conf::EvsViewForgetTimeout, Defaults::EvsViewForgetTimeout), gu::from_string( Defaults::EvsViewForgetTimeoutMin), gu::datetime::Period::max())), inactive_timeout_( check_range(Conf::EvsInactiveTimeout, param( conf, uri, Conf::EvsInactiveTimeout, Defaults::EvsInactiveTimeout), gu::from_string( Defaults::EvsInactiveTimeoutMin), gu::datetime::Period::max())), suspect_timeout_( check_range(Conf::EvsSuspectTimeout, param( conf, uri, Conf::EvsSuspectTimeout, Defaults::EvsSuspectTimeout), gu::from_string( Defaults::EvsSuspectTimeoutMin), gu::datetime::Period::max())), inactive_check_period_( check_range(Conf::EvsInactiveCheckPeriod, param( conf, uri, Conf::EvsInactiveCheckPeriod, Defaults::EvsInactiveCheckPeriod), gu::datetime::Period::min(), suspect_timeout_/2 + 1)), retrans_period_( check_range(Conf::EvsKeepalivePeriod, param( conf, uri, Conf::EvsKeepalivePeriod, Defaults::EvsKeepalivePeriod), gu::from_string( Defaults::EvsKeepalivePeriodMin), suspect_timeout_/3 + 1)), install_timeout_( check_range(Conf::EvsInstallTimeout, param( conf, uri, Conf::EvsInstallTimeout, gu::to_string(inactive_timeout_/2)), retrans_period_, inactive_timeout_ + 1)), join_retrans_period_( check_range(Conf::EvsJoinRetransPeriod, param( conf, uri, Conf::EvsJoinRetransPeriod, Defaults::EvsJoinRetransPeriod), gu::from_string( Defaults::EvsJoinRetransPeriodMin), gu::datetime::Period::max())), stats_report_period_( check_range(Conf::EvsStatsReportPeriod, param( conf, uri, Conf::EvsStatsReportPeriod, Defaults::EvsStatsReportPeriod), gu::from_string( Defaults::EvsStatsReportPeriodMin), gu::datetime::Period::max())), causal_keepalive_period_(retrans_period_), delay_margin_(param( conf, uri, Conf::EvsDelayMargin, Defaults::EvsDelayMargin)), delayed_keep_period_(param( conf, uri, Conf::EvsDelayedKeepPeriod, Defaults::EvsDelayedKeepPeriod)), last_inactive_check_ (gu::datetime::Date::monotonic()), last_causal_keepalive_ (gu::datetime::Date::monotonic()), current_view_(0, ViewId(V_TRANS, my_uuid, rst_view ? rst_view -> id().seq() + 1 : 0)), previous_view_(), previous_views_(), gather_views_(), input_map_(new InputMap()), causal_queue_(), consensus_(*this, known_, *input_map_, current_view_), last_sent_join_tstamp_(), install_message_(0), max_view_id_seq_(0), attempt_seq_(1), new_view_logged_(false), max_install_timeouts_( check_range(Conf::EvsMaxInstallTimeouts, param(conf, uri, Conf::EvsMaxInstallTimeouts, Defaults::EvsMaxInstallTimeouts), 0, std::numeric_limits::max())), install_timeout_count_(0), fifo_seq_(-1), last_sent_(-1), send_window_( check_range(Conf::EvsSendWindow, param(conf, uri, Conf::EvsSendWindow, Defaults::EvsSendWindow), gu::from_string(Defaults::EvsSendWindowMin), std::numeric_limits::max())), user_send_window_( check_range(Conf::EvsUserSendWindow, param(conf, uri, Conf::EvsUserSendWindow, Defaults::EvsUserSendWindow), gu::from_string(Defaults::EvsUserSendWindowMin), send_window_ + 1)), bytes_since_request_user_msg_feedback_(), output_(), send_buf_(), max_output_size_(128), mtu_(mtu), use_aggregate_(param(conf, uri, Conf::EvsUseAggregate, Defaults::EvsUseAggregate)), self_loopback_(false), state_(S_CLOSED), shift_to_rfcnt_(0), pending_leave_(false), isolation_end_(gu::datetime::Date::zero()), delayed_list_(), auto_evict_(param(conf, uri, Conf::EvsAutoEvict, Defaults::EvsAutoEvict)) { log_info << "EVS version " << version_; conf.set(Conf::EvsVersion, gu::to_string(version_)); conf.set(Conf::EvsViewForgetTimeout, gu::to_string(view_forget_timeout_)); conf.set(Conf::EvsSuspectTimeout, gu::to_string(suspect_timeout_)); conf.set(Conf::EvsInactiveTimeout, gu::to_string(inactive_timeout_)); conf.set(Conf::EvsKeepalivePeriod, gu::to_string(retrans_period_)); conf.set(Conf::EvsInactiveCheckPeriod, gu::to_string(inactive_check_period_)); conf.set(Conf::EvsJoinRetransPeriod, gu::to_string(join_retrans_period_)); conf.set(Conf::EvsInstallTimeout, gu::to_string(install_timeout_)); conf.set(Conf::EvsStatsReportPeriod, gu::to_string(stats_report_period_)); conf.set(Conf::EvsCausalKeepalivePeriod, gu::to_string(causal_keepalive_period_)); conf.set(Conf::EvsSendWindow, gu::to_string(send_window_)); conf.set(Conf::EvsUserSendWindow, gu::to_string(user_send_window_)); conf.set(Conf::EvsUseAggregate, gu::to_string(use_aggregate_)); conf.set(Conf::EvsDebugLogMask, gu::to_string(debug_mask_, std::hex)); conf.set(Conf::EvsInfoLogMask, gu::to_string(info_mask_, std::hex)); conf.set(Conf::EvsMaxInstallTimeouts, gu::to_string(max_install_timeouts_)); conf.set(Conf::EvsDelayMargin, gu::to_string(delay_margin_)); conf.set(Conf::EvsDelayedKeepPeriod, gu::to_string(delayed_keep_period_)); conf.set(Conf::EvsAutoEvict, gu::to_string(auto_evict_)); // known_.insert_unique( std::make_pair(my_uuid_, Node(*this))); self_i_ = known_.begin(); assert(NodeMap::value(self_i_).operational() == true); NodeMap::value(self_i_).set_index(0); input_map_->reset(1); current_view_.add_member(my_uuid_, segment_); // we don't need to store previous views, do we ? if (rst_view) { previous_view_ = *rst_view; previous_views_.insert( std::make_pair(rst_view -> id(), gu::datetime::Date::monotonic())); } if (mtu_ != std::numeric_limits::max()) { send_buf_.reserve(mtu_); } } gcomm::evs::Proto::~Proto() { output_.clear(); delete install_message_; delete input_map_; } bool gcomm::evs::Proto::set_param(const std::string& key, const std::string& val, Protolay::sync_param_cb_t& sync_param_cb) { if (key == gcomm::Conf::EvsVersion) { version_ = check_range(Conf::EvsVersion, gu::from_string(val), 0, GCOMM_PROTOCOL_MAX_VERSION + 1); conf_.set(Conf::EvsVersion, gu::to_string(version_)); // trigger configuration change to propagate version shift_to(S_GATHER, true); return true; } else if (key == gcomm::Conf::EvsSendWindow) { send_window_ = check_range(Conf::EvsSendWindow, gu::from_string(val), user_send_window_, std::numeric_limits::max()); conf_.set(Conf::EvsSendWindow, gu::to_string(send_window_)); return true; } else if (key == gcomm::Conf::EvsUserSendWindow) { user_send_window_ = check_range( Conf::EvsUserSendWindow, gu::from_string(val), gu::from_string(Defaults::EvsUserSendWindowMin), send_window_ + 1); conf_.set(Conf::EvsUserSendWindow, gu::to_string(user_send_window_)); return true; } else if (key == gcomm::Conf::EvsMaxInstallTimeouts) { max_install_timeouts_ = check_range( Conf::EvsMaxInstallTimeouts, gu::from_string(val), 0, std::numeric_limits::max()); conf_.set(Conf::EvsMaxInstallTimeouts, gu::to_string(max_install_timeouts_)); return true; } else if (key == Conf::EvsStatsReportPeriod) { stats_report_period_ = check_range( Conf::EvsStatsReportPeriod, gu::from_string(val), gu::from_string(Defaults::EvsStatsReportPeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsStatsReportPeriod, gu::to_string(stats_report_period_)); reset_timer(T_STATS); return true; } else if (key == Conf::EvsInfoLogMask) { info_mask_ = gu::from_string(val, std::hex); conf_.set(Conf::EvsInfoLogMask, gu::to_string(info_mask_, std::hex)); return true; } else if (key == Conf::EvsDebugLogMask) { debug_mask_ = gu::from_string(val, std::hex); conf_.set(Conf::EvsDebugLogMask, gu::to_string(debug_mask_, std::hex)); return true; } else if (key == Conf::EvsSuspectTimeout) { suspect_timeout_ = check_range( Conf::EvsSuspectTimeout, gu::from_string(val), gu::from_string(Defaults::EvsSuspectTimeoutMin), gu::datetime::Period::max()); conf_.set(Conf::EvsSuspectTimeout, gu::to_string(suspect_timeout_)); reset_timer(T_INACTIVITY); return true; } else if (key == Conf::EvsInactiveTimeout) { inactive_timeout_ = check_range( Conf::EvsInactiveTimeout, gu::from_string(val), gu::from_string(Defaults::EvsInactiveTimeoutMin), gu::datetime::Period::max()); conf_.set(Conf::EvsInactiveTimeout, gu::to_string(inactive_timeout_)); reset_timer(T_INACTIVITY); return true; } else if (key == Conf::EvsKeepalivePeriod) { retrans_period_ = check_range( Conf::EvsKeepalivePeriod, gu::from_string(val), gu::from_string(Defaults::EvsKeepalivePeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsKeepalivePeriod, gu::to_string(retrans_period_)); reset_timer(T_RETRANS); return true; } else if (key == Conf::EvsCausalKeepalivePeriod) { causal_keepalive_period_ = check_range( Conf::EvsCausalKeepalivePeriod, gu::from_string(val), gu::datetime::Period(0), gu::datetime::Period::max()); conf_.set(Conf::EvsCausalKeepalivePeriod, gu::to_string(causal_keepalive_period_)); // no timer reset here, causal keepalives don't rely on timer return true; } else if (key == Conf::EvsJoinRetransPeriod) { join_retrans_period_ = check_range( Conf::EvsJoinRetransPeriod, gu::from_string(val), gu::from_string(Defaults::EvsJoinRetransPeriodMin), gu::datetime::Period::max()); conf_.set(Conf::EvsJoinRetransPeriod, gu::to_string(join_retrans_period_)); reset_timer(T_RETRANS); return true; } else if (key == Conf::EvsInstallTimeout) { install_timeout_ = check_range( Conf::EvsInstallTimeout, gu::from_string(val), retrans_period_*2, inactive_timeout_ + 1); conf_.set(Conf::EvsInstallTimeout, gu::to_string(install_timeout_)); reset_timer(T_INSTALL); return true; } else if (key == Conf::EvsUseAggregate) { use_aggregate_ = gu::from_string(val); conf_.set(Conf::EvsUseAggregate, gu::to_string(use_aggregate_)); return true; } else if (key == Conf::EvsDelayMargin) { delay_margin_ = gu::from_string(val); conf_.set(Conf::EvsDelayMargin, gu::to_string(delay_margin_)); return true; } else if (key == Conf::EvsDelayedKeepPeriod) { delayed_keep_period_ = gu::from_string(val); conf_.set(Conf::EvsDelayedKeepPeriod, gu::to_string(delayed_keep_period_)); return true; } else if (key == Conf::EvsEvict) { if (val.size()) { UUID uuid; std::istringstream is(val); is >> uuid; log_info << "Evicting node " << uuid << " permanently from cluster"; evict(uuid); if (state() == S_OPERATIONAL && current_view_.is_member(uuid) == true) { shift_to(S_GATHER, true); } } else { Protolay::EvictList::const_iterator i, i_next; for (i = evict_list().begin(); i != evict_list().end(); i = i_next) { i_next = i, ++i_next; log_info << "unevicting " << Protolay::EvictList::key(i); unevict(Protolay::EvictList::key(i)); } } return true; } else if (key == Conf::EvsAutoEvict) { auto_evict_ = gu::from_string(val); conf_.set(Conf::EvsAutoEvict, gu::to_string(auto_evict_)); return true; } else if (key == Conf::EvsViewForgetTimeout || key == Conf::EvsInactiveCheckPeriod) { gu_throw_error(EPERM) << "can't change value for '" << key << "' during runtime"; } return false; } void gcomm::evs::Proto::handle_get_status(gu::Status& status) const { status.insert("evs_state", to_string(state_)); status.insert("evs_repl_latency", safe_deliv_latency_.to_string()); std::string delayed_list_str; for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { if (is_evicted(i->first) == false || current_view_.is_member(i->first) == true) { delayed_list_str += i->first.full_str() + ":" + i->second.addr() + ":" + gu::to_string(i->second.state_change_cnt()); delayed_list_str += ","; } } // Strip trailing comma if (delayed_list_str.empty() == false) { delayed_list_str.resize(delayed_list_str.size() - 1); } status.insert("evs_delayed", delayed_list_str); std::string evict_list_str; for (Protolay::EvictList::const_iterator i(evict_list().begin()); i != evict_list().end(); ) { evict_list_str += EvictList::key(i).full_str(); if (++i != evict_list().end()) evict_list_str += ","; } status.insert("evs_evict_list", evict_list_str); if (info_mask_ & I_STATISTICS) { status.insert("evs_safe_hs", hs_safe_.to_string()); status.insert("evs_causal_hs", hs_local_causal_.to_string()); status.insert("evs_outq_avg", gu::to_string(std::fabs(double(send_queue_s_)/ double(n_send_queue_s_)))); status.insert("evs_sent_user", gu::to_string(sent_msgs_[Message::EVS_T_USER])); status.insert("evs_sent_delegate", gu::to_string(sent_msgs_[Message::EVS_T_DELEGATE])); status.insert("evs_sent_gap", gu::to_string(sent_msgs_[Message::EVS_T_GAP])); status.insert("evs_sent_join", gu::to_string(sent_msgs_[Message::EVS_T_JOIN])); status.insert("evs_sent_install", gu::to_string(sent_msgs_[Message::EVS_T_INSTALL])); status.insert("evs_sent_leave", gu::to_string(sent_msgs_[Message::EVS_T_LEAVE])); status.insert("evs_retransmitted", gu::to_string(retrans_msgs_)); status.insert("evs_recovered", gu::to_string(recovered_msgs_)); status.insert("evs_deliv_safe", gu::to_string(delivered_msgs_[O_SAFE])); } } std::ostream& gcomm::evs::operator<<(std::ostream& os, const Proto& p) { os << "evs::proto(" << p.self_string() << ", " << p.to_string(p.state()) << ") {\n"; os << "current_view=" << p.current_view_ << ",\n"; os << "input_map=" << *p.input_map_ << ",\n"; os << "fifo_seq=" << p.fifo_seq_ << ",\n"; os << "last_sent=" << p.last_sent_ << ",\n"; os << "known:\n"; for (NodeMap::const_iterator i(p.known_.begin()); i != p.known_.end(); ++i) { os << NodeMap::key(i) << " at " << p.get_address(NodeMap::key(i)) << "\n"; os << NodeMap::value(i) << "\n"; } if (p.install_message_ != 0) os << "install msg=" << *p.install_message_ << "\n"; os << " }"; return os; } std::string gcomm::evs::Proto::stats() const { std::ostringstream os; os << "\n\tnodes " << current_view_.members().size(); os << "\n\tagreed deliv hist {" << hs_agreed_ << "} "; os << "\n\tsafe deliv hist {" << hs_safe_ << "} "; os << "\n\tcaus deliv hist {" << hs_local_causal_ << "} "; os << "\n\toutq avg " << double(send_queue_s_)/double(n_send_queue_s_); os << "\n\tsent {"; std::copy(sent_msgs_.begin(), sent_msgs_.end(), std::ostream_iterator(os, ",")); os << "}\n\tsent per sec {"; const double norm(double(gu::datetime::Date::monotonic().get_utc() - last_stats_report_.get_utc())/gu::datetime::Sec); std::vector result(7, norm); std::transform(sent_msgs_.begin(), sent_msgs_.end(), result.begin(), result.begin(), std::divides()); std::copy(result.begin(), result.end(), std::ostream_iterator(os, ",")); os << "}\n\trecvd { "; std::copy(recvd_msgs_.begin(), recvd_msgs_.end(), std::ostream_iterator(os, ",")); os << "}\n\trecvd per sec {"; std::fill(result.begin(), result.end(), norm); std::transform(recvd_msgs_.begin(), recvd_msgs_.end(), result.begin(), result.begin(), std::divides()); std::copy(result.begin(), result.end(), std::ostream_iterator(os, ",")); os << "}\n\tretransmitted " << retrans_msgs_ << " "; os << "\n\trecovered " << recovered_msgs_; os << "\n\tdelivered {"; std::copy(delivered_msgs_.begin(), delivered_msgs_.end(), std::ostream_iterator(os, ", ")); os << "}\n\teff(delivered/sent) " << double(accumulate(delivered_msgs_.begin() + 1, delivered_msgs_.begin() + O_SAFE + 1, 0)) /double(accumulate(sent_msgs_.begin(), sent_msgs_.end(), 0)); return os.str(); } void gcomm::evs::Proto::reset_stats() { hs_agreed_.clear(); hs_safe_.clear(); hs_local_causal_.clear(); safe_deliv_latency_.clear(); send_queue_s_ = 0; n_send_queue_s_ = 0; last_stats_report_ = gu::datetime::Date::monotonic(); } bool gcomm::evs::Proto::is_msg_from_previous_view(const Message& msg) { ViewList::const_iterator i; if ((i = previous_views_.find(msg.source_view_id())) != previous_views_.end()) { evs_log_debug(D_FOREIGN_MSGS) << " message " << msg << " from previous view " << i->first; return true; } // If node is in current view, check message source view seq, if it is // smaller than current view seq then the message is also from some // previous (but unknown to us) view NodeList::const_iterator ni(current_view_.members().find(msg.source())); if (ni != current_view_.members().end()) { if (msg.source_view_id().seq() < current_view_.id().seq()) { log_warn << "stale message from unknown origin " << msg; return true; } } return false; } void gcomm::evs::Proto::handle_inactivity_timer() { gu_trace(check_inactive()); gu_trace(cleanup_views()); gu_trace(cleanup_evicted()); } void gcomm::evs::Proto::handle_retrans_timer() { evs_log_debug(D_TIMERS) << "retrans timer"; if (state() == S_GATHER || state() == S_JOINING) { if (install_message_ != 0) { // Retransmit install message if representative and all commit // gaps have not been received yet. if (is_all_committed() == false && install_message_->source() == uuid()) { evs_log_debug(D_INSTALL_MSGS) << "retrans install"; gu::Buffer buf; install_message_->set_flags( install_message_->flags() | Message::F_RETRANS); (void)serialize(*install_message_, buf); Datagram dg(buf); // Must not be sent as delegate, newly joining node // will filter them out in handle_msg(). gu_trace(send_down(dg, ProtoDownMeta())); } evs_log_debug(D_GAP_MSGS) << "resend commit gap"; // Resend commit gap gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); } else { evs_log_debug(D_JOIN_MSGS) << "retrans join"; gu_trace(send_join(true)); } } else if (state() == S_INSTALL) { gcomm_assert(install_message_ != 0); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range())); } else if (state() == S_OPERATIONAL) { const seqno_t prev_last_sent(last_sent_); evs_log_debug(D_TIMERS) << "sending keepalive, last_sent=" << last_sent_; Datagram dg; gu_trace((void)send_user(dg, 0xff, O_DROP, -1, -1)); if (prev_last_sent == last_sent_) { log_warn << "could not send keepalive"; } } else if (state() == S_LEAVING) { evs_log_debug(D_TIMERS) << "send leave timer"; send_leave(false); retrans_missing(); } } void gcomm::evs::Proto::isolate(gu::datetime::Period period) { isolation_end_ = gu::datetime::Date::monotonic() + period; } void gcomm::evs::Proto::handle_install_timer() { gcomm_assert(state() == S_GATHER || state() == S_INSTALL); log_info << self_string() << " install timer expired"; bool is_cons(consensus_.is_consensus()); bool is_repr(is_representative(uuid())); evs_log_info(I_STATE) << "before inspection:"; evs_log_info(I_STATE) << "consensus: " << is_cons; evs_log_info(I_STATE) << "repr : " << is_repr; evs_log_info(I_STATE) << "state dump for diagnosis:"; std::cerr << *this << std::endl; if (install_timeout_count_ < max_install_timeouts_ ) { // before reaching max_install_timeouts, declare only inconsistent // nodes as inactive for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); if (node_uuid != uuid() && (node.join_message() == 0 || consensus_.is_consistent(*node.join_message()) == false)) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " as inactive due to expired install timer"; set_inactive(NodeMap::key(i)); } } } else if (install_timeout_count_ == max_install_timeouts_) { // max install timeouts reached, declare all other nodes // as inactive for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::key(i) != uuid()) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " as inactive due to expired install timer"; set_inactive(NodeMap::key(i)); } } log_info << "max install timeouts reached, will isolate node " << "for " << suspect_timeout_ + inactive_timeout_; isolate(suspect_timeout_ + inactive_timeout_); } else if (install_timeout_count_ > max_install_timeouts_) { log_info << "going to give up, state dump for diagnosis:"; std::cerr << *this << std::endl; gu_throw_fatal << self_string() << " failed to form singleton view after exceeding " << "max_install_timeouts " << max_install_timeouts_ << ", giving up"; } if (install_message_ != 0) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::value(i).committed() == false) { log_info << self_string() << " node " << NodeMap::key(i) << " failed to commit for install message, " << "declaring inactive"; if (NodeMap::key(i) != uuid()) { set_inactive(NodeMap::key(i)); } } } } else { log_info << "no install message received"; } shift_to(S_GATHER, true); is_cons = consensus_.is_consensus(); is_repr = is_representative(uuid()); evs_log_info(I_STATE) << "after inspection:"; evs_log_info(I_STATE) << "consensus: " << is_cons; evs_log_info(I_STATE) << "repr : " << is_repr; if (is_cons == true && is_repr == true) { send_install(EVS_CALLER); } install_timeout_count_++; } void gcomm::evs::Proto::handle_stats_timer() { reset_stats(); } class TimerSelectOp { public: TimerSelectOp(const gcomm::evs::Proto::Timer t_) : t(t_) { } bool operator()(const gcomm::evs::Proto::TimerList::value_type& vt) const { return (gcomm::evs::Proto::TimerList::value(vt) == t); } private: gcomm::evs::Proto::Timer const t; }; gu::datetime::Date gcomm::evs::Proto::next_expiration(const Timer t) const { gcomm_assert(state() != S_CLOSED); gu::datetime::Date now(gu::datetime::Date::monotonic()); switch (t) { case T_INACTIVITY: return (now + inactive_check_period_); case T_RETRANS: switch (state()) { case S_OPERATIONAL: case S_LEAVING: return (now + retrans_period_); case S_JOINING: case S_GATHER: case S_INSTALL: return (now + join_retrans_period_); default: gu_throw_fatal; } case T_INSTALL: switch (state()) { case S_GATHER: case S_INSTALL: return (now + install_timeout_); default: return gu::datetime::Date::max(); } case T_STATS: return (now + stats_report_period_); } gu_throw_fatal; } void timer_list_erase_by_type(gcomm::evs::Proto::TimerList& timer_list, gcomm::evs::Proto::Timer timer) { gcomm::evs::Proto::TimerList::iterator i, i_next; for (i = timer_list.begin(); i != timer_list.end(); i = i_next) { i_next = i, ++i_next; if (gcomm::evs::Proto::TimerList::value(i) == timer) { timer_list.erase(i); } } } void gcomm::evs::Proto::reset_timer(Timer t) { timer_list_erase_by_type(timers_, t); timers_.insert(std::make_pair(next_expiration(t), t)); } void gcomm::evs::Proto::cancel_timer(Timer t) { timer_list_erase_by_type(timers_, t); } gu::datetime::Date gcomm::evs::Proto::handle_timers() { gu::datetime::Date now(gu::datetime::Date::monotonic()); while (timers_.empty() == false && TimerList::key(timers_.begin()) <= now) { Timer t(TimerList::value(timers_.begin())); timers_.erase(timers_.begin()); switch (t) { case T_INACTIVITY: handle_inactivity_timer(); break; case T_RETRANS: handle_retrans_timer(); break; case T_INSTALL: handle_install_timer(); break; case T_STATS: handle_stats_timer(); break; } if (state() == S_CLOSED) { return gu::datetime::Date::max(); } reset_timer(t); } if (timers_.empty() == true) { evs_log_debug(D_TIMERS) << "no timers set"; return gu::datetime::Date::max(); } return TimerList::key(timers_.begin()); } void gcomm::evs::Proto::check_inactive() { const gu::datetime::Date now(gu::datetime::Date::monotonic()); if (last_inactive_check_ + inactive_check_period_*3 < now) { log_warn << "last inactive check more than " << inactive_check_period_*3 << " ago (" << (now - last_inactive_check_) << "), skipping check"; last_inactive_check_ = now; return; } NodeMap::value(self_i_).set_tstamp(gu::datetime::Date::monotonic()); std::for_each(known_.begin(), known_.end(), InspectNode()); bool has_inactive(false); size_t n_suspected(0); bool do_send_delayed_list(false); // Iterate over known nodes and check inactive/suspected/delayed status for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { if (i == self_i_) continue; // No need to check self const UUID& node_uuid(NodeMap::key(i)); Node& node(NodeMap::value(i)); if (node_uuid != uuid() && (node.is_inactive() == true || node.is_suspected() == true )) { if (node.operational() == true && node.is_inactive() == true) { log_info << self_string() << " detected inactive node: " << node_uuid; } else if (node.is_suspected() == true && node.is_inactive() == false) { log_info << self_string() << " suspecting node: " << node_uuid; } if (node.is_inactive() == true) { set_inactive(node_uuid); } if (node.is_suspected() == true && node.operational() == true) { ++n_suspected; if (node.join_message() == 0) { log_info << self_string() << " suspected node without join message, declaring inactive"; set_inactive(node_uuid); } } has_inactive = true; } DelayedList::iterator dli(delayed_list_.find(node_uuid)); if (auto_evict_ && node.seen_tstamp() + retrans_period_ + delay_margin_ <= now) { if (node.index() != Node::invalid_index) { // Delayed node in group, check input map state and request // message recovery if necessary Range range(input_map_->range(node.index())); log_info << "delayed node: " << node_uuid << ", requesting range " << Range(range.lu(), last_sent_); if (last_sent_ >= range.lu()) { // Request missing message range from delayed node. request_retrans(node_uuid, node_uuid, Range(range.lu(), last_sent_)); } } if (dli == delayed_list_.end()) { delayed_list_.insert( std::make_pair(node_uuid, DelayedEntry(get_address(node_uuid)))); } else { dli->second.set_tstamp(now); dli->second.set_state(DelayedEntry::S_DELAYED, delayed_keep_period_, now); evs_log_debug(D_STATE) << "set '" << dli->first << "' delayed state to S_DELAYED , cnt = " << dli->second.state_change_cnt(); // todo(dirlt): make threshold as a configurable variable ? if (dli->second.state_change_cnt() > 0) { do_send_delayed_list = true; } } } else if (dli != delayed_list_.end()) { const size_t prev_cnt(dli->second.state_change_cnt()); dli->second.set_state(DelayedEntry::S_OK, delayed_keep_period_, now); if (prev_cnt != dli->second.state_change_cnt()) { dli->second.set_tstamp(now); } evs_log_debug(D_STATE) << "set '" << dli->first << "' delayed state to S_OK. prev_cnt = " << prev_cnt << ", cur_cnt = " << dli->second.state_change_cnt(); if (dli->second.state_change_cnt() > 0) { do_send_delayed_list = true; } } } // Clean up delayed list and evict list messages { DelayedList::iterator i, i_next; for (i = delayed_list_.begin(); i != delayed_list_.end(); i = i_next) { i_next = i, ++i_next; // State change count has decayed back to zero // or node is already evicted and not in the current view // anymore. if ((i->second.state_change_cnt() == 0 && i->second.state() == DelayedEntry::S_OK) || (is_evicted(i->first) == true && current_view_.is_member(i->first) == false)) { log_debug << "remove '" << i->first << "' from delayed_list"; delayed_list_.erase(i); } } for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { Node& node(NodeMap::value(i)); const DelayedListMessage* const elm(node.delayed_list_message()); if (elm != 0 && elm->tstamp() + delayed_keep_period_ < now) { log_debug << "discarding expired elm from " << elm->source(); node.set_delayed_list_message(0); } } } if (current_view_.version() > 0 && do_send_delayed_list == true && auto_evict_ > 0) { send_delayed_list(); } // All other nodes are under suspicion, set all others as inactive. // This will speed up recovery when this node has been isolated from // other group. Note that this should be done only if known size is // greater than 2 in order to avoid immediate split brain. if (known_.size() > 2 && n_suspected + 1 == known_.size()) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::key(i) != uuid()) { evs_log_info(I_STATE) << " setting source " << NodeMap::key(i) << " inactive (other nodes under suspicion)"; set_inactive(NodeMap::key(i)); } } } if (has_inactive == true && state() == S_OPERATIONAL) { gu_trace(shift_to(S_GATHER, true)); } else if (has_inactive == true && state() == S_LEAVING && n_operational() == 1) { gu_trace(shift_to(S_CLOSED)); } last_inactive_check_ = now; // Check if isolation period has ended if (isolation_end_ != gu::datetime::Date::zero() && isolation_end_ <= now) { log_info << "ending isolation"; isolation_end_ = gu::datetime::Date::zero(); } } void gcomm::evs::Proto::set_inactive(const UUID& node_uuid) { NodeMap::iterator i; gcomm_assert(node_uuid != uuid()); gu_trace(i = known_.find_checked(node_uuid)); evs_log_debug(D_STATE) << "setting " << node_uuid << " inactive"; Node& node(NodeMap::value(i)); node.set_tstamp(gu::datetime::Date::zero()); node.set_join_message(0); // node.set_leave_message(0); node.set_operational(false); } bool gcomm::evs::Proto::is_inactive(const UUID& uuid) const { NodeMap::const_iterator i; gu_trace(i = known_.find_checked(uuid)); const Node& node(NodeMap::value(i)); return (node.operational() == false); } void gcomm::evs::Proto::cleanup_foreign(const InstallMessage& im) { NodeMap::iterator i, i_next; for (i = known_.begin(); i != known_.end(); i = i_next) { const UUID& uuid(NodeMap::key(i)); i_next = i, ++i_next; const MessageNodeList::const_iterator mni(im.node_list().find(uuid)); if (mni == im.node_list().end() || MessageNodeList::value(mni).operational() == false) { known_.erase(i); } } } void gcomm::evs::Proto::cleanup_views() { gu::datetime::Date now(gu::datetime::Date::monotonic()); ViewList::iterator i, i_next; for (i = previous_views_.begin(); i != previous_views_.end(); i = i_next) { i_next = i, ++i_next; if (i->second + view_forget_timeout_ <= now) { evs_log_debug(D_STATE) << " erasing view: " << i->first; previous_views_.erase(i); } } } void gcomm::evs::Proto::cleanup_evicted() { gu::datetime::Date now(gu::datetime::Date::monotonic()); Protolay::EvictList::const_iterator i, i_next; for (i = evict_list().begin(); i != evict_list().end(); i = i_next) { i_next = i, ++i_next; if (Protolay::EvictList::value(i) + view_forget_timeout_ <= now) { log_info << "unevicting " << Protolay::EvictList::key(i); unevict(Protolay::EvictList::key(i)); } } } size_t gcomm::evs::Proto::n_operational() const { NodeMap::const_iterator i; size_t ret = 0; for (i = known_.begin(); i != known_.end(); ++i) { if (i->second.operational() == true) ret++; } return ret; } void gcomm::evs::Proto::deliver_reg_view(const InstallMessage& im, const View& prev_view) { View view(im.version(), im.install_view_id()); for (MessageNodeList::const_iterator i(im.node_list().begin()); i != im.node_list().end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); // 1) Operational nodes will be members of new view // 2) Operational nodes that were not present in previous // view are going also to joined set // 3) Leaving nodes go to left set // 4) All other nodes present in previous view but not in // member of left set are considered partitioned if (mn.operational() == true) { view.add_member(uuid, mn.segment()); if (prev_view.is_member(uuid) == false) { view.add_joined(uuid, mn.segment()); } } else if (mn.leaving() == true) { view.add_left(uuid, mn.segment()); } else { // Partitioned set is constructed after this loop } // If node has been evicted, it should have been added to // evicted list via JOIN messages. assert(mn.evicted() == false || is_evicted(uuid) == true); } // Loop over previous view and add each node not in new view // member of left set as partitioned. for (NodeList::const_iterator i(prev_view.members().begin()); i != prev_view.members().end(); ++i) { const UUID& uuid(NodeList::key(i)); const gcomm::Node& mn(NodeList::value(i)); if (view.is_member(uuid) == false && view.is_leaving(uuid) == false) { view.add_partitioned(uuid, mn.segment()); } } evs_log_info(I_VIEWS) << "delivering view " << view; // This node must be a member of the view it delivers and // view id UUID must be of one of the members. gcomm_assert(view.is_member(uuid()) == true); gcomm_assert(view.is_member(view.id().uuid()) == true) << "view id UUID " << view.id().uuid() << " not found from reg view members " << view.members() << " must abort to avoid possibility of two groups " << "with the same view id"; set_stable_view(view); ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); send_up(Datagram(), up_meta); } void gcomm::evs::Proto::deliver_trans_view(const InstallMessage& im, const View& curr_view) { // Trans view is intersection of members in curr_view // and members going to be in the next view that come from // curr_view according to install message View view(current_view_.version(), ViewId(V_TRANS, curr_view.id().uuid(), curr_view.id().seq())); for (MessageNodeList::const_iterator i(im.node_list().begin()); i != im.node_list().end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); if (curr_view.id() == mn.view_id() && curr_view.is_member(uuid) == true) { // 1) Operational nodes go to next view // 2) Leaving nodes go to left set // 3) All other nodes present in previous view but not in // member of left set are considered partitioned if (mn.operational() == true) { view.add_member(uuid, mn.segment()); } else if (mn.leaving() == true) { view.add_left(uuid, mn.segment()); } else { // Partitioned set is constructed after this loop } } } // Loop over current view and add each node not in new view // member of left set as partitioned. for (NodeList::const_iterator i(curr_view.members().begin()); i != curr_view.members().end(); ++i) { const UUID& uuid(NodeList::key(i)); const gcomm::Node& mn(NodeList::value(i)); if (view.is_member(uuid) == false && view.is_leaving(uuid) == false) { view.add_partitioned(uuid, mn.segment()); } } // This node must be a member of the view it delivers and // if the view is the last transitional, view must have // exactly one member and no-one in left set. gcomm_assert(view.is_member(uuid()) == true); evs_log_info(I_VIEWS) << " delivering view " << view; ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); gu_trace(send_up(Datagram(), up_meta)); } void gcomm::evs::Proto::deliver_empty_view() { View view(0, V_REG); evs_log_info(I_VIEWS) << "delivering view " << view; ProtoUpMeta up_meta(UUID::nil(), ViewId(), &view); send_up(Datagram(), up_meta); } void gcomm::evs::Proto::setall_committed(bool val) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_committed(val); } } // Check if commit gaps from all known nodes found from install message have // been seen. bool gcomm::evs::Proto::is_all_committed() const { gcomm_assert(install_message_ != 0); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); if (install_message_->node_list().find(uuid) != install_message_->node_list().end() && inst.operational() == true && inst.committed() == false) { return false; } } return true; } void gcomm::evs::Proto::setall_installed(bool val) { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_installed(val); } } // Check if gaps from new view from all known nodes found from install // message have been seen. bool gcomm::evs::Proto::is_all_installed() const { gcomm_assert(install_message_ != 0); for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& uuid(NodeMap::key(i)); const Node& inst(NodeMap::value(i)); if (install_message_->node_list().find(uuid) != install_message_->node_list().end() && inst.operational() == true && inst.installed() == false) { return false; } } return true; } void gcomm::evs::Proto::cleanup_joins() { for (NodeMap::iterator i = known_.begin(); i != known_.end(); ++i) { NodeMap::value(i).set_join_message(0); } } bool gcomm::evs::Proto::is_representative(const UUID& uuid) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { if (NodeMap::value(i).operational() == true && NodeMap::value(i).is_inactive() == false) { assert(NodeMap::value(i).leave_message() == 0); if (NodeMap::value(i).leave_message() != 0) { log_warn << "operational node " << NodeMap::key(i) << " with leave message: " << NodeMap::value(i); continue; } return (uuid == NodeMap::key(i)); } } return false; } bool gcomm::evs::Proto::is_all_suspected(const UUID& uuid) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const Node& node(NodeMap::value(i)); if (node.operational() == true) { const JoinMessage* jm(node.join_message()); if (!jm) return false; const MessageNodeList::const_iterator j(jm->node_list().find(uuid)); if (!(j != jm->node_list().end() && MessageNodeList::value(j).suspected())) return false; } } return true; } ///////////////////////////////////////////////////////////////////////////// // Message sending ///////////////////////////////////////////////////////////////////////////// bool gcomm::evs::Proto::is_flow_control(const seqno_t seq, const seqno_t win) const { gcomm_assert(seq != -1 && win != -1); const seqno_t base(input_map_->safe_seq()); if (seq > base + win) { return true; } return false; } bool gcomm::evs::Proto::request_user_msg_feedback(const gcomm::Datagram& dg) const { // Request feedback from peers at least once per 128kB chunk. This will // force the nodes to complete their seqnos. if (bytes_since_request_user_msg_feedback_ + dg.len() >= (size_t(1) << 17)) { evs_log_debug(D_USER_MSGS) << "bytes since request user msg feedback: " << bytes_since_request_user_msg_feedback_ << " dg len: " << dg.len(); return true; } return false; } int gcomm::evs::Proto::send_user(Datagram& dg, uint8_t const user_type, Order const order, seqno_t const win, seqno_t const up_to_seqno, size_t const n_aggregated) { assert(state() == S_LEAVING || state() == S_GATHER || state() == S_OPERATIONAL); assert(dg.offset() == 0); assert(n_aggregated == 1 || output_.size() >= n_aggregated); gcomm_assert(up_to_seqno == -1 || up_to_seqno >= last_sent_); gcomm_assert(up_to_seqno == -1 || win == -1); int ret; const seqno_t seq(last_sent_ + 1); if (win != -1 && is_flow_control(seq, win) == true) { return EAGAIN; } // seq_range max 0xff because of Message seq_range_ field limitation seqno_t seq_range( std::min(up_to_seqno == -1 ? 0 : up_to_seqno - seq, evs::seqno_t(0xff))); seqno_t last_msg_seq(seq + seq_range); uint8_t flags; // If output queue wont contain messages after this patch, // up_to_seqno is given (msg completion) or flow contol would kick in // at next batch, don't set F_MSG_MORE. Also if the number of bytes // in send pipeline exceeds predefined value as reported by // request_user_msg_feedback(), the F_MSG_MORE will not get set. if (output_.size() <= n_aggregated || up_to_seqno != -1 || (win != -1 && (is_flow_control(last_msg_seq + 1, win) || request_user_msg_feedback(dg)))) { flags = 0; bytes_since_request_user_msg_feedback_ = 0; } else { flags = Message::F_MSG_MORE; bytes_since_request_user_msg_feedback_ += dg.len(); } if (n_aggregated > 1) { flags |= Message::F_AGGREGATE; } // Maximize seq range in the case next message batch won't be sent // immediately. if ((flags & Message::F_MSG_MORE) == 0 && up_to_seqno == -1) { seq_range = input_map_->max_hs() - seq; seq_range = std::max(static_cast(0), seq_range); seq_range = std::min(static_cast(0xff), seq_range); if (seq_range != 0) { log_debug << "adjusted seq range to: " << seq_range; last_msg_seq = seq + seq_range; } } gcomm_assert(last_msg_seq >= seq && last_msg_seq - seq <= 0xff); gcomm_assert(seq_range >= 0 && seq_range <= 0xff); UserMessage msg(version_, uuid(), current_view_.id(), seq, input_map_->aru_seq(), seq_range, order, ++fifo_seq_, user_type, flags); // Insert first to input map to determine correct aru seq Range range; gu_trace(range = input_map_->insert(NodeMap::value(self_i_).index(), msg, dg)); gcomm_assert(range.hs() == last_msg_seq) << msg << " " << *input_map_ << " " << *this; last_sent_ = last_msg_seq; assert(range.hs() == last_sent_); update_im_safe_seq(NodeMap::value(self_i_).index(), input_map_->aru_seq()); msg.set_aru_seq(input_map_->aru_seq()); evs_log_debug(D_USER_MSGS) << " sending " << msg; gu_trace(push_header(msg, dg)); if ((ret = send_down(dg, ProtoDownMeta())) != 0) { log_debug << "send failed: " << strerror(ret); } gu_trace(pop_header(msg, dg)); sent_msgs_[Message::EVS_T_USER]++; if (delivering_ == false) { gu_trace(deliver()); gu_trace(deliver_local()); } return 0; } size_t gcomm::evs::Proto::aggregate_len() const { bool is_aggregate(false); size_t ret(0); AggregateMessage am; out_queue::const_iterator i(output_.begin()); const Order ord(i->second.order()); ret += i->first.len() + am.serial_size(); for (++i; i != output_.end() && i->second.order() == ord; ++i) { if (ret + i->first.len() + am.serial_size() <= mtu()) { ret += i->first.len() + am.serial_size(); is_aggregate = true; } else { break; } } evs_log_debug(D_USER_MSGS) << "is aggregate " << is_aggregate << " ret " << ret; return (is_aggregate == true ? ret : 0); } int gcomm::evs::Proto::send_user(const seqno_t win) { gcomm_assert(output_.empty() == false); gcomm_assert(state() == S_OPERATIONAL); gcomm_assert(win <= send_window_); int ret; size_t alen; if (use_aggregate_ == true && (alen = aggregate_len()) > 0) { // Messages can be aggregated into single message send_buf_.resize(alen); size_t offset(0); size_t n(0); out_queue::const_iterator i(output_.begin()); Order ord(i->second.order()); while ((alen > 0 && i != output_.end())) { const Datagram& dg(i->first); const ProtoDownMeta dm(i->second); AggregateMessage am(0, dg.len(), dm.user_type()); gcomm_assert(alen >= dg.len() + am.serial_size()); gu_trace(offset = am.serialize(&send_buf_[0], send_buf_.size(), offset)); std::copy(dg.header() + dg.header_offset(), dg.header() + dg.header_size(), &send_buf_[0] + offset); offset += (dg.header_len()); std::copy(dg.payload().begin(), dg.payload().end(), &send_buf_[0] + offset); offset += dg.payload().size(); alen -= dg.len() + am.serial_size(); ++n; ++i; } Datagram dg(gu::SharedBuffer(new gu::Buffer(send_buf_.begin(), send_buf_.end()))); if ((ret = send_user(dg, 0xff, ord, win, -1, n)) == 0) { while (n-- > 0) { output_.pop_front(); } } } else { std::pair wb(output_.front()); if ((ret = send_user(wb.first, wb.second.user_type(), wb.second.order(), win, -1)) == 0) { output_.pop_front(); } } return ret; } void gcomm::evs::Proto::complete_user(const seqno_t high_seq) { gcomm_assert(state() == S_OPERATIONAL || state() == S_GATHER); evs_log_debug(D_USER_MSGS) << "completing seqno to " << high_seq;; Datagram wb; int err; err = send_user(wb, 0xff, O_DROP, -1, high_seq); if (err != 0) { log_debug << "failed to send completing msg " << strerror(err) << " seq=" << high_seq << " send_window=" << send_window_ << " last_sent=" << last_sent_; } } int gcomm::evs::Proto::send_delegate(Datagram& wb, const UUID& target) { DelegateMessage dm(version_, uuid(), current_view_.id(), ++fifo_seq_); push_header(dm, wb); int ret = send_down(wb, ProtoDownMeta(target)); pop_header(dm, wb); sent_msgs_[Message::EVS_T_DELEGATE]++; return ret; } bool gcomm::evs::Proto::gap_rate_limit(const UUID& target, const Range& range) const { NodeMap::const_iterator target_i(known_.find(target)); // Sanity check: The target should always be in the set // of known nodes. If it is not, skip sending the gap message // in production. assert(target_i != known_.end()); if (target_i == known_.end()) { return true; } const Node& target_node(target_i->second); // Limit requesting ranges with the same highest seen within // 50msec period. gu::datetime::Date now(gu::datetime::Date::monotonic()); if (now < target_node.last_requested_range_tstamp() + gu::datetime::MSec*100) { evs_log_debug(D_GAP_MSGS) << "Rate limiting gap: now " << now << " requested range tstamp: " << target_node.last_requested_range_tstamp() << " requested range: " << target_node.last_requested_range(); return true; } return false; } void gcomm::evs::Proto::send_gap(EVS_CALLER_ARG, const UUID& range_uuid, const ViewId& source_view_id, const Range range, const bool commit) { assert(range_uuid == UUID::nil()); assert(range.is_empty()); gcomm_assert((commit == false && source_view_id == current_view_.id()) || install_message_ != 0); uint8_t flags(0); if (commit == true) flags |= Message::F_COMMIT; GapMessage gm(version_, uuid(), source_view_id, (source_view_id == current_view_.id() ? last_sent_ : (commit == true ? install_message_->fifo_seq() : -1)), (source_view_id == current_view_.id() ? input_map_->aru_seq() : -1), ++fifo_seq_, range_uuid, range, flags); evs_log_debug(D_GAP_MSGS) << EVS_LOG_METHOD << gm; gu::Buffer buf; serialize(gm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta(range_uuid)); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::EVS_T_GAP]++; gu_trace(handle_gap(gm, self_i_)); } void gcomm::evs::Proto::populate_node_list(MessageNodeList* node_list) const { for (NodeMap::const_iterator i = known_.begin(); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); MessageNode mnode(node.operational(), node.suspected(), is_evicted(node_uuid)); if (node_uuid != uuid()) { const JoinMessage* jm(node.join_message()); const LeaveMessage* lm(node.leave_message()); // if (jm != 0) { const ViewId& nsv(jm->source_view_id()); const MessageNode& mn(MessageNodeList::value(jm->node_list().find_checked(node_uuid))); mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), -1, jm->source_view_id(), (nsv == current_view_.id() ? input_map_->safe_seq(node.index()) : mn.safe_seq()), (nsv == current_view_.id() ? input_map_->range(node.index()) : mn.im_range())); } else if (lm != 0) { const ViewId& nsv(lm->source_view_id()); mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), lm->seq(), nsv, (nsv == current_view_.id() ? input_map_->safe_seq(node.index()) : -1), (nsv == current_view_.id() ? input_map_->range(node.index()) : Range())); } else if (current_view_.is_member(node_uuid) == true) { mnode = MessageNode(node.operational(), node.is_suspected(), node.segment(), is_evicted(node_uuid), -1, current_view_.id(), input_map_->safe_seq(node.index()), input_map_->range(node.index())); } } else { mnode = MessageNode(true, false, node.segment(), is_evicted(node_uuid), -1, current_view_.id(), input_map_->safe_seq(node.index()), input_map_->range(node.index())); } gu_trace((void)node_list->insert_unique(std::make_pair(node_uuid, mnode))); } // Iterate over evicted_list and add evicted nodes not yet in node list. for (Protolay::EvictList::const_iterator i(evict_list().begin()); i != evict_list().end(); ++i) { if (node_list->find(Protolay::EvictList::key(i)) == node_list->end()) { // default arguments are evil. MessageNode mnode(false, false, 0, true); gu_trace((void)node_list->insert_unique( std::make_pair(Protolay::EvictList::key(i), mnode))); } } evs_log_debug(D_CONSENSUS) << "populate node list:\n" << *node_list; } const gcomm::evs::JoinMessage& gcomm::evs::Proto::create_join() { MessageNodeList node_list; gu_trace(populate_node_list(&node_list)); JoinMessage jm(version_, uuid(), current_view_.id(), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); NodeMap::value(self_i_).set_join_message(&jm); evs_log_debug(D_JOIN_MSGS) << " created join message " << jm; return *NodeMap::value(self_i_).join_message(); } void gcomm::evs::Proto::set_join(const JoinMessage& jm, const UUID& source) { NodeMap::iterator i; gu_trace(i = known_.find_checked(source)); NodeMap::value(i).set_join_message(&jm);; } void gcomm::evs::Proto::set_leave(const LeaveMessage& lm, const UUID& source) { NodeMap::iterator i; gu_trace(i = known_.find_checked(source)); Node& inst(NodeMap::value(i)); if (inst.leave_message()) { evs_log_debug(D_LEAVE_MSGS) << "Duplicate leave:\told: " << *inst.leave_message() << "\tnew: " << lm; } else { inst.set_leave_message(&lm); } } void gcomm::evs::Proto::send_join(bool handle) { assert(output_.empty() == true); JoinMessage jm(create_join()); // Allow connections for all members that may be accepted // in the next view. for (const auto& node : jm.node_list()) { if (node.second.operational() && not node.second.suspected() && not node.second.evicted()) { allow_connect(node.first); } } gu::Buffer buf; serialize(jm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); } else { last_sent_join_tstamp_ = gu::datetime::Date::monotonic(); } sent_msgs_[Message::EVS_T_JOIN]++; if (handle == true) { handle_join(jm, self_i_); } } void gcomm::evs::Proto::send_leave(bool handle) { gcomm_assert(state() == S_LEAVING); /* Move all pending messages from output to input map */ while (output_.empty() == false) { std::pair wb = output_.front(); if (send_user(wb.first, wb.second.user_type(), wb.second.order(), -1, -1) != 0) { gu_throw_fatal << "send_user() failed"; } output_.pop_front(); } LeaveMessage lm(version_, uuid(), current_view_.id(), last_sent_, input_map_->aru_seq(), ++fifo_seq_); evs_log_debug(D_LEAVE_MSGS) << "sending leave msg " << lm; gu::Buffer buf; serialize(lm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed " << strerror(err); } sent_msgs_[Message::EVS_T_LEAVE]++; if (handle == true) { handle_leave(lm, self_i_); } } struct ViewIdCmp { bool operator()(const gcomm::evs::NodeMap::value_type& a, const gcomm::evs::NodeMap::value_type& b) const { using gcomm::evs::NodeMap; gcomm_assert(NodeMap::value(a).join_message() != 0 && NodeMap::value(b).join_message() != 0); return (NodeMap::value(a).join_message()->source_view_id().seq() < NodeMap::value(b).join_message()->source_view_id().seq()); } }; struct ProtoVerCmp { bool operator()(const gcomm::evs::NodeMap::value_type& a, const gcomm::evs::NodeMap::value_type& b) const { using gcomm::evs::NodeMap; gcomm_assert(NodeMap::value(a).join_message() != 0 && NodeMap::value(b).join_message() != 0); return (NodeMap::value(a).join_message()->version() < NodeMap::value(b).join_message()->version()); } }; void gcomm::evs::Proto::send_install(EVS_CALLER_ARG) { gcomm_assert(consensus_.is_consensus() == true && is_representative(uuid()) == true) << *this; // Select list of operational nodes from known NodeMap oper_list; for_each(known_.begin(), known_.end(), OperationalSelect(oper_list)); NodeMap::const_iterator max_node = max_element(oper_list.begin(), oper_list.end(), ViewIdCmp()); // Compute maximum known view id seq max_view_id_seq_ = std::max(max_view_id_seq_, NodeMap::value(max_node).join_message()->source_view_id().seq()); // Compute highest commonly supported protocol version. // Oper_list is non-empty, join message existence is asserted. const int version( NodeMap::value( std::min_element(oper_list.begin(), oper_list.end(), ProtoVerCmp())).join_message()->version()); MessageNodeList node_list; populate_node_list(&node_list); InstallMessage imsg(version, uuid(), current_view_.id(), ViewId(V_REG, uuid(), max_view_id_seq_ + attempt_seq_), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); ++attempt_seq_; evs_log_debug(D_INSTALL_MSGS) << EVS_LOG_METHOD << imsg; evs_log_info(I_STATE) << "sending install message" << imsg; gcomm_assert(consensus_.is_consistent(imsg)); gu::Buffer buf; serialize(imsg, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta()); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::EVS_T_INSTALL]++; handle_install(imsg, self_i_); } void gcomm::evs::Proto::send_delayed_list() { DelayedListMessage elm(version_, uuid(), current_view_.id(), ++fifo_seq_); for (DelayedList::const_iterator i(delayed_list_.begin()); i != delayed_list_.end(); ++i) { elm.add(i->first, i->second.state_change_cnt()); } gu::Buffer buf; serialize(elm, buf); Datagram dg(buf); (void)send_down(dg, ProtoDownMeta()); handle_delayed_list(elm, self_i_); } void gcomm::evs::Proto::resend(const UUID& gap_source, const Range range) { gcomm_assert(gap_source != uuid()); gcomm_assert(range.lu() <= range.hs()) << "lu (" << range.lu() << ") > hs(" << range.hs() << ")"; if (range.lu() <= input_map_->safe_seq()) { evs_log_debug(D_RETRANS) << self_string() << "lu (" << range.lu() << ") <= safe_seq(" << input_map_->safe_seq() << "), can't recover message"; return; } evs_log_debug(D_RETRANS) << " retrans requested by " << gap_source << " " << range.lu() << " -> " << range.hs(); // All of the nodes have received all messages up to input_map_->safe_seq(), // therefore it does not make sense to retransmit anything below that. seqno_t seq(std::max(range.lu(), input_map_->safe_seq() + 1)); evs_log_debug(D_RETRANS) << "retransmitting from " << seq; while (seq <= range.hs()) { InputMap::iterator msg_i = input_map_->find( NodeMap::value(self_i_).index(), seq); if (msg_i == input_map_->end()) { try { gu_trace(msg_i = input_map_->recover( NodeMap::value(self_i_).index(), seq)); } catch (...) { evs_log_debug(D_RETRANS) << "could not recover message " << gap_source << ":" << seq; seq = seq + 1; continue; } } const UserMessage& msg(InputMapMsgIndex::value(msg_i).msg()); gcomm_assert(msg.source() == uuid()); Datagram rb(InputMapMsgIndex::value(msg_i).rb()); assert(rb.offset() == 0); UserMessage um(msg.version(), msg.source(), msg.source_view_id(), msg.seq(), input_map_->aru_seq(), msg.seq_range(), msg.order(), msg.fifo_seq(), msg.user_type(), static_cast( Message::F_RETRANS | (msg.flags() & Message::F_AGGREGATE))); push_header(um, rb); int err = send_down(rb, ProtoDownMeta(gap_source)); if (err != 0) { log_debug << "send failed: " << strerror(err); break; } else { evs_log_debug(D_RETRANS) << "retransmitted " << um; } seq = seq + msg.seq_range() + 1; retrans_msgs_++; } } void gcomm::evs::Proto::recover(const UUID& gap_source, const UUID& range_uuid, const Range range) { gcomm_assert(gap_source != uuid()) << "gap_source (" << gap_source << ") == uuid() (" << uuid() << " state " << *this; gcomm_assert(range.lu() <= range.hs()) << "lu (" << range.lu() << ") > hs (" << range.hs() << ")"; if (range.lu() <= input_map_->safe_seq()) { evs_log_debug(D_RETRANS) << "lu (" << range.lu() << ") <= safe_seq(" << input_map_->safe_seq() << "), can't recover message"; return; } const Node& range_node(NodeMap::value(known_.find_checked(range_uuid))); const Range im_range(input_map_->range(range_node.index())); evs_log_debug(D_RETRANS) << " recovering message from " << range_uuid << " requested by " << gap_source << " requested range " << range << " available " << im_range; // All of the nodes have received all messages up to input_map_->safe_seq(), // therefore it does not make sense to retransmit anything below that. seqno_t seq(std::max(range.lu(), input_map_->safe_seq() + 1)); evs_log_debug(D_RETRANS) << "recovering from " << seq; size_t n_recovered(0); while (seq <= range.hs() && seq <= im_range.hs()) { InputMap::iterator msg_i = input_map_->find(range_node.index(), seq); if (msg_i == input_map_->end()) { try { gu_trace(msg_i = input_map_->recover(range_node.index(), seq)); } catch (...) { seq = seq + 1; continue; } } const UserMessage& msg(InputMapMsgIndex::value(msg_i).msg()); assert(msg.source() == range_uuid); Datagram rb(InputMapMsgIndex::value(msg_i).rb()); assert(rb.offset() == 0); UserMessage um(msg.version(), msg.source(), msg.source_view_id(), msg.seq(), msg.aru_seq(), msg.seq_range(), msg.order(), msg.fifo_seq(), msg.user_type(), static_cast( Message::F_SOURCE | Message::F_RETRANS | (msg.flags() & Message::F_AGGREGATE))); push_header(um, rb); ++n_recovered; int err = send_delegate(rb, gap_source); if (err != 0) { log_debug << "send failed: " << strerror(err); break; } else { evs_log_debug(D_RETRANS) << "recover " << um; } seq = seq + msg.seq_range() + 1; recovered_msgs_++; } evs_log_debug(D_RETRANS) << "recovered: " << n_recovered; } class UUIDFixedPartCmp { public: UUIDFixedPartCmp(const gcomm::UUID& uuid) : uuid_(uuid) { } bool operator()(const gcomm::evs::NodeMap::value_type& vt) const { return uuid_.fixed_part_matches(vt.first); } private: const gcomm::UUID& uuid_; }; void gcomm::evs::Proto::handle_foreign(const Message& msg) { // no need to handle foreign LEAVE message if (msg.type() == Message::EVS_T_LEAVE) { return; } // Don't handle foreign messages in install phase. // This includes not only INSTALL state, but also // GATHER state after receiving install message. if (install_message_ != 0) { evs_log_debug(D_FOREIGN_MSGS) << " dropping foreign message from " << msg.source() << " in install state"; return; } if (is_msg_from_previous_view(msg) == true) { return; } const UUID& source(msg.source()); if (source == UUID::nil()) { log_warn << "Received message with nil source UUID, dropping"; return; } NodeMap::iterator i; if ((i = std::find_if(known_.begin(), known_.end(), UUIDFixedPartCmp(source))) != known_.end()) { // Keep the new incarnation out of the group until a new view has been // established. evs_log_debug(D_FOREIGN_MSGS) << "Dropping message from new incarnation of already known " "node in current view, old: " << i->first << " new: " << source; return; } // When joining, wait until at least one of the existing node sees // a join message from joining node. This is to reduce the probability // of install timeouts because of already ongoing cluster configuration // changes. const bool is_join_message_with_self = msg.type() == Message::EVS_T_JOIN && msg.node_list().find(my_uuid_) != msg.node_list().end(); if (state() == S_JOINING && not is_join_message_with_self) { evs_log_debug(D_FOREIGN_MSGS) << "Join message without self in S_JOINING state, dropping message"; return; } evs_log_info(I_STATE) << " detected new message source " << source; gu_trace(i = known_.insert_unique( std::make_pair(source, Node(*this)))); assert(NodeMap::value(i).operational() == true); if (state() == S_JOINING || state() == S_GATHER || state() == S_OPERATIONAL) { evs_log_info(I_STATE) << " shift to GATHER due to foreign message from " << msg.source(); gu_trace(shift_to(S_GATHER, false)); // Reset install timer each time foreign message is seen to // synchronize install timers. reset_timer(T_INSTALL); } // Set join message after shift to recovery, shift may clean up // join messages if (msg.type() == Message::EVS_T_JOIN) { set_join(static_cast(msg), msg.source()); } send_join(true); } void gcomm::evs::Proto::handle_msg(const Message& msg, const Datagram& rb, bool direct) { assert(msg.type() <= Message::EVS_T_DELAYED_LIST); if (msg.type() > Message::EVS_T_DELAYED_LIST) { return; } if (state() == S_CLOSED) { return; } if (isolation_end_ != gu::datetime::Date::zero()) { evs_log_debug(D_STATE) << " dropping message due to isolation"; // Isolation period is on return; } if (msg.source() == uuid()) { evs_log_debug(D_FOREIGN_MSGS) << " dropping own message"; return; } if (msg.version() > GCOMM_PROTOCOL_MAX_VERSION) { log_info << "incompatible protocol version " << static_cast(msg.version()); return; } gcomm_assert(msg.source() != UUID::nil()); // Figure out if the message is from known source NodeMap::iterator ii = known_.find(msg.source()); if (ii == known_.end()) { gu_trace(handle_foreign(msg)); return; } Node& node(NodeMap::value(ii)); if (direct == true) { node.set_seen_tstamp(gu::datetime::Date::monotonic()); } if (state() == S_LEAVING && msg.source_view_id() == current_view_.id()) { // Allow messages in leaving state. This is needed for both // updating the join messages for retransmission and for handling // retransmitted messages. evs_log_debug(D_FOREIGN_MSGS) << "Allow message from current view " << "in leaving state" << msg; } else if (node.operational() == false && node.leave_message() == 0 && (msg.flags() & Message::F_RETRANS) == 0) { // We have set this node unoperational and there was // probably good reason to do so. Don't accept messages // from it before new view has been formed. // Exceptions: // - Node that is leaving // - Retransmitted messages. // why we accept retransimted messages? // a node sends a message, some nodes(A) get it, but some(B) don't // then this node is non-operational(or unreachable) // so A need to send B the missing message(in envelope as delegate message) // otherwise the input map will not be consistent forever. // and user message in delegate message always comes with F_RETRANS flag. evs_log_debug(D_FOREIGN_MSGS) << " dropping message from unoperational source " << node; return; } // Filter out non-fifo messages if (msg.fifo_seq() != -1 && (msg.flags() & Message::F_RETRANS) == 0) { if (node.fifo_seq() >= msg.fifo_seq()) { evs_log_debug(D_FOREIGN_MSGS) << "droppoing non-fifo message " << msg << " fifo seq " << node.fifo_seq(); return; } else { node.set_fifo_seq(msg.fifo_seq()); } } // Accept non-membership messages only from current view // or from view to be installed if (msg.is_membership() == false && msg.source_view_id() != current_view_.id() && (install_message_ == 0 || install_message_->install_view_id() != msg.source_view_id())) { // If source node seems to be operational but it has proceeded // into new view, mark it as unoperational in order to create // intermediate views before re-merge. if (node.installed() == true && node.operational() == true && is_msg_from_previous_view(msg) == false && state() != S_LEAVING) { if (new_view_logged_ == false) { evs_log_info(I_STATE) << " detected new view from operational source " << msg.source() << ": " << msg.source_view_id(); new_view_logged_ = true; } // Note: Commented out, this causes problems with // attempt_seq. Newly (remotely?) generated install message // followed by commit gap may cause undesired // node inactivation and shift to gather. // // set_inactive(msg.source()); // gu_trace(shift_to(S_GATHER, true)); } evs_log_debug(D_FOREIGN_MSGS) << "dropping non-membership message from foreign view"; return; } else if (NodeMap::value(ii).index() == Node::invalid_index && msg.source_view_id() == current_view_.id()) { log_warn << "Message from node that claims to come from same view but is not in current view " << msg; assert(0); return; } recvd_msgs_[msg.type()]++; switch (msg.type()) { case Message::EVS_T_USER: gu_trace(handle_user(static_cast(msg), ii, rb)); break; case Message::EVS_T_DELEGATE: gu_trace(handle_delegate(static_cast(msg), ii, rb)); break; case Message::EVS_T_GAP: gu_trace(handle_gap(static_cast(msg), ii)); break; case Message::EVS_T_JOIN: gu_trace(handle_join(static_cast(msg), ii)); break; case Message::EVS_T_LEAVE: gu_trace(handle_leave(static_cast(msg), ii)); break; case Message::EVS_T_INSTALL: gu_trace(handle_install(static_cast(msg), ii)); break; case Message::EVS_T_DELAYED_LIST: gu_trace(handle_delayed_list( static_cast(msg), ii)); break; default: log_warn << "invalid message type " << msg.type(); } } //////////////////////////////////////////////////////////////////////// // Protolay interface //////////////////////////////////////////////////////////////////////// std::pair, size_t> gcomm::evs::Proto::unserialize_message(const UUID& source, const Datagram& rb) { size_t offset = 0; const gu::byte_t* begin(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); std::unique_ptr ret; switch (Message::get_type(begin, available, offset)) { case Message::EVS_T_NONE: gu_throw_fatal; break; case Message::EVS_T_USER: ret = std::unique_ptr(new UserMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_DELEGATE: ret = std::unique_ptr(new DelegateMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_GAP: ret = std::unique_ptr(new GapMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_JOIN: ret = std::unique_ptr(new JoinMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_INSTALL: ret = std::unique_ptr(new InstallMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_LEAVE: ret = std::unique_ptr(new LeaveMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; case Message::EVS_T_DELAYED_LIST: ret = std::unique_ptr(new DelayedListMessage); gu_trace(offset = ret->unserialize(begin, available, offset)); break; default: return {std::unique_ptr{}, 0}; } /* Message did not have source field, must be set from source reported by the lower layer. */ if ((ret->flags() & Message::F_SOURCE) == 0) { assert(source != UUID::nil()); gcomm_assert(source != UUID::nil()); ret->set_source(source); } return {std::move(ret), offset + rb.offset()}; } void gcomm::evs::Proto::handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (state() == S_CLOSED || um.source() == uuid() || is_evicted(um.source())) { // Silent drop return; } gcomm_assert(um.source() != UUID::nil()); std::pair, size_t> msg; try { gu_trace(msg = unserialize_message(um.source(), rb)); if (not msg.first) { /* Message could not be serialized. */ return; } handle_msg(*msg.first, Datagram(rb, msg.second), (msg.first->flags() & Message::F_RETRANS) == 0); } catch (gu::Exception& e) { switch (e.get_errno()) { case EPROTONOSUPPORT: log_warn << e.what(); break; case EINVAL: log_warn << "invalid message: " << *msg.first; break; default: log_fatal << "exception caused by message: " << *msg.first; std::cerr << " state after handling message: " << *this; throw; } } } int gcomm::evs::Proto::handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (state() == S_GATHER || state() == S_INSTALL) { return EAGAIN; } else if (state() != S_OPERATIONAL) { return ENOTCONN; } if (dm.order() == O_LOCAL_CAUSAL) { gu::datetime::Date now(gu::datetime::Date::monotonic()); if (causal_queue_.empty() == true && last_sent_ == input_map_->safe_seq() && causal_keepalive_period_ > gu::datetime::Period(0) && last_causal_keepalive_ + causal_keepalive_period_ > now) { assert(last_sent_ == input_map_->aru_seq()); // Input map should either be empty (all messages // delivered) or the undelivered messages have higher // seqno than safe_seq. Even if the delivry is // done below if needed, this assertion should stay // to catch errors in logic elsewhere in the code. assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); if (input_map_->begin() != input_map_->end() && input_map_->is_safe(input_map_->begin()) == true) { gu_trace(deliver()); if (input_map_->begin() != input_map_->end() && input_map_->is_safe(input_map_->begin()) == true) { // If the input map state is still not good for fast path, // the situation is not likely to clear immediately. Return // error to retry later. return EAGAIN; } } hs_local_causal_.insert(0.0); deliver_causal(dm.user_type(), last_sent_, wb); } else { seqno_t causal_seqno(input_map_->aru_seq()); if (causal_keepalive_period_ == gu::datetime::Period(0) || last_causal_keepalive_ + causal_keepalive_period_ <= now) { // generate traffic to make sure that group is live Datagram dg; int err(send_user(dg, 0xff, O_DROP, -1, -1)); if (err != 0) { return err; } // reassign causal_seqno to be last_sent: // in order to make sure that the group is live, // safe seqno must be advanced and in this case // safe seqno equals to aru seqno. causal_seqno = last_sent_; last_causal_keepalive_ = now; } causal_queue_.push_back(CausalMessage(dm.user_type(), causal_seqno, wb)); } return 0; } // Limit outbound bytes to out_queue::max_outbound_bytes (1MB) // to limit the time it takes to transmit all outbound messages // during configuration change. if (output_.outbound_bytes() >= out_queue::max_outbound_bytes) { return EAGAIN; } send_queue_s_ += output_.size(); ++n_send_queue_s_; int ret = 0; if (output_.empty() == true) { int err; err = send_user(wb, dm.user_type(), dm.order(), user_send_window_, -1); switch (err) { case EAGAIN: output_.push_back(std::make_pair(wb, dm)); // fall through case 0: ret = 0; break; default: log_error << "send error: " << err; ret = err; } } else { output_.push_back(std::make_pair(wb, dm)); } return ret; } int gcomm::evs::Proto::send_down(Datagram& dg, const ProtoDownMeta& dm) { if (isolation_end_ != gu::datetime::Date::zero()) { // Node has isolated itself, don't emit any messages return 0; } else { return Protolay::send_down(dg, dm); } } ///////////////////////////////////////////////////////////////////////////// // State handler ///////////////////////////////////////////////////////////////////////////// void gcomm::evs::Proto::shift_to(const State s, const bool send_j) { if (shift_to_rfcnt_ > 0) gu_throw_fatal << *this; shift_to_rfcnt_++; static const bool allowed[S_MAX][S_MAX] = { // CLOSED JOINING LEAVING GATHER INSTALL OPERAT { false, true, false, false, false, false }, // CLOSED { false, false, true, true, false, false }, // JOINING { true, false, false, false, false, false }, // LEAVING { false, false, true, true, true, false }, // GATHER { false, false, false, true, false, true }, // INSTALL { false, false, true, true, false, false } // OPERATIONAL }; assert(s < S_MAX); if (allowed[state_][s] == false) { gu_throw_fatal << "Forbidden state transition: " << to_string(state_) << " -> " << to_string(s); } if (state() != s) { evs_log_info(I_STATE) << " state change: " << to_string(state_) << " -> " << to_string(s); } switch (s) { case S_CLOSED: { gcomm_assert(state() == S_LEAVING); gu_trace(deliver()); gu_trace(deliver_local()); setall_installed(false); NodeMap::value(self_i_).set_installed(true); // Construct install message containing only one node for // last trans view. MessageNodeList node_list; (void)node_list.insert_unique( std::make_pair(uuid(), MessageNode(true, false, NodeMap::value(self_i_).segment(), false, -1, current_view_.id(), input_map_->safe_seq( NodeMap::value(self_i_).index()), input_map_->range( NodeMap::value(self_i_).index())))); InstallMessage im(0, uuid(), current_view_.id(), ViewId(V_REG, uuid(), current_view_.id().seq() + 1), input_map_->safe_seq(), input_map_->aru_seq(), ++fifo_seq_, node_list); gu_trace(deliver_trans_view(im, current_view_)); gu_trace(deliver_trans()); gu_trace(deliver_local(true)); gcomm_assert(causal_queue_.empty() == true); if (collect_stats_ == true) { handle_stats_timer(); } gu_trace(deliver_empty_view()); cleanup_foreign(im); cleanup_views(); timers_.clear(); state_ = S_CLOSED; break; } case S_JOINING: state_ = S_JOINING; reset_timer(T_RETRANS); reset_timer(T_STATS); break; case S_LEAVING: state_ = S_LEAVING; reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); reset_timer(T_INSTALL); break; case S_GATHER: { setall_committed(false); setall_installed(false); delete install_message_; install_message_ = 0; if (state() == S_OPERATIONAL) { while (output_.empty() == false) { int err; gu_trace(err = send_user(-1)); if (err != 0) { gu_throw_fatal << self_string() << "send_user() failed in shifto " << "to S_GATHER: " << strerror(err); } } } else { gcomm_assert(output_.empty() == true); } State prev_state(state_); state_ = S_GATHER; if (send_j == true) { gu_trace(send_join(false)); } gcomm_assert(state() == S_GATHER); reset_timer(T_INACTIVITY); if (prev_state == S_OPERATIONAL || prev_state == S_JOINING) { reset_timer(T_RETRANS); reset_timer(T_INSTALL); } break; } case S_INSTALL: { gcomm_assert(install_message_ != 0); gcomm_assert(is_all_committed() == true); state_ = S_INSTALL; reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); break; } case S_OPERATIONAL: { gcomm_assert(output_.empty() == true); gcomm_assert(install_message_ != 0); gcomm_assert(NodeMap::value(self_i_).join_message() != 0 && consensus_.equal( *NodeMap::value(self_i_).join_message(), *install_message_)) << "install message not consistent with own join, state: " << *this; gcomm_assert(is_all_installed() == true); gu_trace(deliver()); gu_trace(deliver_local()); gu_trace(deliver_trans_view(*install_message_, current_view_)); gu_trace(deliver_trans()); gu_trace(deliver_local(true)); gcomm_assert(causal_queue_.empty() == true); input_map_->clear(); if (collect_stats_ == true) { handle_stats_timer(); } // End of previous view // Construct new view and shift to S_OPERATIONAL before calling // deliver_reg_view(). Reg view delivery may trigger message // exchange on upper layer and operating view is needed to // handle messages. previous_view_ = current_view_; std::copy(gather_views_.begin(), gather_views_.end(), std::inserter(previous_views_, previous_views_.end())); gather_views_.clear(); if (install_message_->version() > current_view_.version()) { log_info << "EVS version upgrade " << current_view_.version() << " -> " << static_cast(install_message_->version()); } else if (install_message_->version() < current_view_.version()) { log_info << "EVS version downgrade " << current_view_.version() << " -> " << static_cast(install_message_->version()); } current_view_ = View(install_message_->version(), install_message_->install_view_id()); size_t idx = 0; const MessageNodeList& imnl(install_message_->node_list()); for (MessageNodeList::const_iterator i(imnl.begin()); i != imnl.end(); ++i) { const UUID& uuid(MessageNodeList::key(i)); const MessageNode& n(MessageNodeList::value(i)); // Add operational nodes to new view, assign input map index NodeMap::iterator nmi(known_.find(uuid)); gcomm_assert(nmi != known_.end()) << "node " << uuid << " not found from known map"; if (n.operational() == true) { current_view_.add_member(uuid, NodeMap::value(nmi).segment()); NodeMap::value(nmi).set_index(idx++); } else { NodeMap::value(nmi).set_index( Node::invalid_index); } } if (previous_view_.id().type() == V_REG && previous_view_.members() == current_view_.members()) { evs_log_info(I_VIEWS) << "subsequent views have same members, prev view " << previous_view_ << " current view " << current_view_; } input_map_->reset(current_view_.members().size()); last_sent_ = -1; state_ = S_OPERATIONAL; deliver_reg_view(*install_message_, previous_view_); cleanup_foreign(*install_message_); cleanup_views(); cleanup_joins(); delete install_message_; install_message_ = 0; attempt_seq_ = 1; install_timeout_count_ = 0; gu_trace(send_gap(EVS_CALLER, UUID::nil(), current_view_.id(), Range()));; gcomm_assert(state() == S_OPERATIONAL); reset_timer(T_INACTIVITY); reset_timer(T_RETRANS); cancel_timer(T_INSTALL); new_view_logged_ = false; break; } default: gu_throw_fatal << "invalid state"; } shift_to_rfcnt_--; } //////////////////////////////////////////////////////////////////////////// // Message delivery //////////////////////////////////////////////////////////////////////////// void gcomm::evs::Proto::deliver_causal(uint8_t user_type, seqno_t seqno, const Datagram& datagram) { send_up(datagram, ProtoUpMeta(uuid(), current_view_.id(), 0, user_type, O_LOCAL_CAUSAL, seqno)); ++delivered_msgs_[O_LOCAL_CAUSAL]; } void gcomm::evs::Proto::deliver_local(bool trans) { // local causal const seqno_t causal_seq(trans == false ? input_map_->safe_seq() : last_sent_); gu::datetime::Date now(gu::datetime::Date::monotonic()); assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); while (causal_queue_.empty() == false && causal_queue_.front().seqno() <= causal_seq) { const CausalMessage& cm(causal_queue_.front()); hs_local_causal_.insert(double(now.get_utc() - cm.tstamp().get_utc())/gu::datetime::Sec); deliver_causal(cm.user_type(), cm.seqno(), cm.datagram()); causal_queue_.pop_front(); } } void gcomm::evs::Proto::validate_reg_msg(const UserMessage& msg) { if (msg.source_view_id() != current_view_.id()) { // Note: This implementation should guarantee same view delivery, // this is sanity check for that. gu_throw_fatal << "reg validate: not current view"; } // Update statistics for locally generated messages if (msg.source() == uuid()) { if (msg.order() == O_SAFE) { gu::datetime::Date now(gu::datetime::Date::monotonic()); double lat(double(now.get_utc() - msg.tstamp().get_utc())/ gu::datetime::Sec); if (info_mask_ & I_STATISTICS) hs_safe_.insert(lat); safe_deliv_latency_.insert(lat); } else if (msg.order() == O_AGREED) { if (info_mask_ & I_STATISTICS) { gu::datetime::Date now(gu::datetime::Date::monotonic()); hs_agreed_.insert(double(now.get_utc() - msg.tstamp().get_utc())/gu::datetime::Sec); } } } } void gcomm::evs::Proto::deliver_finish(const InputMapMsg& msg) { if ((msg.msg().flags() & Message::F_AGGREGATE) == 0) { ++delivered_msgs_[msg.msg().order()]; if (msg.msg().order() != O_DROP) { gu_trace(validate_reg_msg(msg.msg())); ProtoUpMeta um(msg.msg().source(), msg.msg().source_view_id(), 0, msg.msg().user_type(), msg.msg().order(), msg.msg().seq()); try { send_up(msg.rb(), um); } catch (...) { log_info << msg.msg() << " " << msg.rb().len(); throw; } } } else { gu_trace(validate_reg_msg(msg.msg())); size_t offset(0); while (offset < msg.rb().len()) { ++delivered_msgs_[msg.msg().order()]; AggregateMessage am; gu_trace(am.unserialize(msg.rb().payload().data(), msg.rb().payload().size(), offset)); Datagram dg( gu::SharedBuffer( new gu::Buffer( msg.rb().payload().data() + offset + am.serial_size(), msg.rb().payload().data() + offset + am.serial_size() + am.len()))); ProtoUpMeta um(msg.msg().source(), msg.msg().source_view_id(), 0, am.user_type(), msg.msg().order(), msg.msg().seq()); gu_trace(send_up(dg, um)); offset += am.serial_size() + am.len(); } gcomm_assert(offset == msg.rb().len()); } } void gcomm::evs::Proto::deliver() { if (delivering_ == true) { gu_throw_fatal << "Recursive enter to delivery"; } delivering_ = true; if (state() != S_OPERATIONAL && state() != S_GATHER && state() != S_INSTALL && state() != S_LEAVING) { gu_throw_fatal << "invalid state: " << to_string(state()); } evs_log_debug(D_DELIVERY) << " aru_seq=" << input_map_->aru_seq() << " safe_seq=" << input_map_->safe_seq(); // Read input map head until a message which cannot be // delivered is enountered. InputMapMsgIndex::iterator i; while ((i = input_map_->begin()) != input_map_->end()) { const InputMapMsg& msg(InputMapMsgIndex::value(i)); if ((msg.msg().order() <= O_SAFE && input_map_->is_safe(i) == true) || (msg.msg().order() <= O_AGREED && input_map_->is_agreed(i) == true) || (msg.msg().order() <= O_FIFO && input_map_->is_fifo(i) == true)) { deliver_finish(msg); gu_trace(input_map_->erase(i)); } else { if (msg.msg().order() > O_SAFE) { gu_throw_fatal << "Message with order " << msg.msg().order() << " in input map, cannot continue safely"; } break; } } delivering_ = false; assert(input_map_->begin() == input_map_->end() || input_map_->is_safe(input_map_->begin()) == false); } void gcomm::evs::Proto::deliver_trans() { if (delivering_ == true) { gu_throw_fatal << "Recursive enter to delivery"; } delivering_ = true; if (state() != S_INSTALL && state() != S_LEAVING) gu_throw_fatal << "invalid state"; evs_log_debug(D_DELIVERY) << " aru_seq=" << input_map_->aru_seq() << " safe_seq=" << input_map_->safe_seq(); // In transitional configuration we must deliver all messages that // are fifo. This is because: // - We know that it is possible to deliver all fifo messages originated // from partitioned component as safe in partitioned component // - Aru in this component is at least the max known fifo seq // from partitioned component due to message recovery // - All FIFO messages originated from this component must be // delivered to fulfill self delivery requirement and // - FIFO messages originated from this component qualify as AGREED // in transitional configuration InputMap::iterator i, i_next; for (i = input_map_->begin(); i != input_map_->end(); i = i_next) { i_next = i; ++i_next; const InputMapMsg& msg(InputMapMsgIndex::value(i)); bool deliver = false; switch (msg.msg().order()) { case O_SAFE: case O_AGREED: case O_FIFO: case O_DROP: if (input_map_->is_fifo(i) == true) { deliver = true; } break; default: gu_throw_fatal; } if (deliver == true) { if (install_message_ != 0) { const MessageNode& mn( MessageNodeList::value( install_message_->node_list().find_checked( msg.msg().source()))); if (msg.msg().seq() <= mn.im_range().hs()) { deliver_finish(msg); } else { gcomm_assert(mn.operational() == false); log_info << "filtering out trans message higher than " << "install message hs " << mn.im_range().hs() << ": " << msg.msg(); } } else { deliver_finish(msg); } gu_trace(input_map_->erase(i)); } } // Sanity check: // There must not be any messages left that // - Are originated from outside of trans conf and are FIFO // - Are originated from trans conf for (i = input_map_->begin(); i != input_map_->end(); i = i_next) { i_next = i; ++i_next; const InputMapMsg& msg(InputMapMsgIndex::value(i)); NodeMap::iterator ii; gu_trace(ii = known_.find_checked(msg.msg().source())); if (NodeMap::value(ii).installed() == true) { gu_throw_fatal << "Protocol error in transitional delivery " << "(self delivery constraint)"; } else if (input_map_->is_fifo(i) == true) { gu_throw_fatal << "Protocol error in transitional delivery " << "(fifo from partitioned component)"; } gu_trace(input_map_->erase(i)); } delivering_ = false; } ///////////////////////////////////////////////////////////////////////////// // Message handlers ///////////////////////////////////////////////////////////////////////////// gcomm::evs::seqno_t gcomm::evs::Proto::update_im_safe_seq(const size_t uuid, const seqno_t seq) { const seqno_t im_safe_seq(input_map_->safe_seq(uuid)); if (im_safe_seq < seq) { input_map_->set_safe_seq(uuid, seq); } return im_safe_seq; } void gcomm::evs::Proto::send_request_retrans_gap(const UUID& target, const UUID& origin, const Range& range) { GapMessage gm(version_, uuid(), current_view_.id(), last_sent_, input_map_->aru_seq(), ++fifo_seq_, origin, range, Message::F_RETRANS); gu::Buffer buf; serialize(gm, buf); Datagram dg(buf); int err = send_down(dg, ProtoDownMeta(target)); if (err != 0) { log_debug << "send failed: " << strerror(err); } sent_msgs_[Message::EVS_T_GAP]++; } void gcomm::evs::Proto::request_retrans(const UUID& target, const UUID& origin, const Range& range) { NodeMap::const_iterator origin_node_i(known_.find(origin)); assert(origin_node_i != known_.end()); if (origin_node_i == known_.end()) { log_warn << "Origin " << origin << " not found from known nodes"; return; } const Node& origin_node(NodeMap::value(origin_node_i)); if (origin_node.index() == Node::invalid_index) { log_warn << "Origin " << origin << " has no index"; return; } if (not gap_rate_limit(target, range)) { evs_log_debug(D_RETRANS) << self_string() << " requesting retrans from " << target << " origin " << origin << " range " << range << " due to input map gap, aru " << input_map_->aru_seq(); std::vector gap_ranges(input_map_->gap_range_list( origin_node.index(), range)); for (std::vector::const_iterator ri(gap_ranges.begin()); ri != gap_ranges.end(); ++ri) { evs_log_debug(D_RETRANS) << "Requesting retransmssion from " << target << " origin: " << origin << " range: " << *ri; send_request_retrans_gap(target, origin, *ri); } NodeMap::iterator target_i(known_.find(target)); if (target_i != known_.end()) { target_i->second.last_requested_range(range); } } } // Select suitable node for recovering missing messages. The node // is chosen to be one with join message originating from the same // view and highest lowest unseen for origin. struct SelectRecoveryNodeForMissingResult { gcomm::evs::seqno_t lowest_unseen; gcomm::UUID target; SelectRecoveryNodeForMissingResult() : lowest_unseen(-1) , target() { } }; class SelectRecoveryNodeForMissing { public: SelectRecoveryNodeForMissing(const gcomm::evs::Proto& evs, const gcomm::UUID& origin, const gcomm::ViewId& view_id, SelectRecoveryNodeForMissingResult& result /* Out parameter */) : evs_(evs) , origin_(origin) , view_id_(view_id) , result_(result) { } void operator()(const gcomm::evs::NodeMap::value_type& node_v) { // Do not try to recover from self. if (evs_.uuid() == node_v.first) return; if (node_v.second.operational()) { gcomm::evs::seqno_t lu(get_lu_for(origin_, node_v.second)); if (lu > result_.lowest_unseen) { result_.lowest_unseen = lu; result_.target = node_v.first; } } } private: gcomm::evs::seqno_t get_lu_from_join_for(const gcomm::UUID& origin, const gcomm::evs::JoinMessage& jm) { gcomm::evs::MessageNodeList::const_iterator origin_i( jm.node_list().find(origin)); if (origin_i != jm.node_list().end()) { return origin_i->second.im_range().lu(); } return -1; } gcomm::evs::seqno_t get_lu_for(const gcomm::UUID& origin, const gcomm::evs::Node& node) { const gcomm::evs::JoinMessage* jm(node.join_message()); // No join message received if (not jm) return -1; // Not in the same view if (jm->source_view_id() != view_id_) return -1; return get_lu_from_join_for(origin, *jm); } const gcomm::evs::Proto& evs_; const gcomm::UUID& origin_; const gcomm::ViewId& view_id_; SelectRecoveryNodeForMissingResult& result_; // Reference to out parameter }; void gcomm::evs::Proto::request_missing() { // This method should be called only during configuration changes. // In operational state requests should be done based on // detected gaps and on delayed node checks. assert(state() != S_OPERATIONAL); for (NodeMap::const_iterator node_i(known_.begin()); node_i != known_.end(); ++node_i) { const UUID& origin(node_i->first); if (origin == my_uuid_) continue; // No need to request from self. const Node& node(node_i->second); // Node has no index assigned, so it was not in the current group. if (node.index() == Node::invalid_index) continue; Range range(input_map_->range(node.index())); if ((not range.is_empty() || range.hs() < last_sent_) && (node.leave_message() == 0 || node.leave_message()->seq() > range.hs())) { // Missing messages from node. If it is still considerd operational, // send a retransimission request to it. Otherwise locate some // other node to recover the missing messages. if (node.operational()) { const Range request_range(range.lu(), last_sent_); if (not request_range.is_empty()) { request_retrans(origin, origin, request_range); } } else { // Try to find suitable node to recover the missing messages // from origin. SelectRecoveryNodeForMissingResult result; std::for_each(known_.begin(), known_.end(), SelectRecoveryNodeForMissing( *this, origin, current_view_.id(), result)); // If the target node was found, it has messages up to // result.lowest_unseen - 1 from origin. const Range request_range(range.lu(), result.lowest_unseen - 1); if (result.target != UUID::nil() && not request_range.is_empty()) { request_retrans(result.target, origin, request_range); } else { evs_log_debug(D_RETRANS) << "Could not find a node to recover messages " << "from, missing from " << origin << " range: " << range << " last_sent: " << last_sent_; } } } } } class ResendMissingRanges { public: ResendMissingRanges(gcomm::evs::Proto& evs, gcomm::evs::seqno_t last_sent, const gcomm::ViewId& view_id) : evs_(evs) , last_sent_(last_sent) , view_id_(view_id) { } void operator()(const gcomm::evs::NodeMap::value_type& node_v) { if (node_v.first == evs_.uuid()) return; // No need to inspect self const gcomm::evs::JoinMessage* jm(node_v.second.join_message()); if (jm && jm->source_view_id() == view_id_) { resend_missing_from_join_message(*jm); } const gcomm::evs::LeaveMessage* lm(node_v.second.leave_message()); if (lm && lm->source_view_id() == view_id_) { resend_missing_from_leave_message(*lm); } } private: void resend_missing_from_join_message(const gcomm::evs::JoinMessage& jm) { gcomm::evs::MessageNodeList::const_iterator self_i( jm.node_list().find(evs_.uuid())); if (self_i == jm.node_list().end()) { log_warn << "Node join message claims to be from the same " << "view but does not list this node, " << "own uuid: " << evs_.uuid() << " join message: " << jm; return; } if (self_i->second.im_range().lu() <= last_sent_) { evs_.resend(jm.source(), gcomm::evs::Range(self_i->second.im_range().lu(), last_sent_)); } } void resend_missing_from_leave_message(const gcomm::evs::LeaveMessage& lm) { if (lm.aru_seq() < last_sent_) { evs_.resend(lm.source(), gcomm::evs::Range(lm.aru_seq() + 1, last_sent_)); } } gcomm::evs::Proto& evs_; const gcomm::evs::seqno_t last_sent_; const gcomm::ViewId& view_id_; }; void gcomm::evs::Proto::retrans_missing() { // This method should be called only during configuration changes. // In operational state retransmits should happen only by // responding to retrans request Gap messages. assert(state() != S_OPERATIONAL); // Iterate over join messages and retransmit is some nodes // have not received all messages. ResendMissingRanges resend_missing(*this, last_sent_, current_view_.id()); std::for_each(known_.begin(), known_.end(), resend_missing); } void gcomm::evs::Proto::handle_user_from_different_view( const Node& source_node, const UserMessage& msg) { if (state() == S_LEAVING) { // Silent drop return; } if (is_msg_from_previous_view(msg) == true) { evs_log_debug(D_FOREIGN_MSGS) << "user message " << msg << " from previous view"; return; } if (source_node.operational() == false) { evs_log_debug(D_STATE) << "dropping message from unoperational source " << msg.source(); } else if (source_node.installed() == false) { if (install_message_ != 0 && msg.source_view_id() == install_message_->install_view_id()) { assert(state() == S_GATHER || state() == S_INSTALL); evs_log_debug(D_STATE) << " recovery user message " << msg; // This is possible if install timer expires just before // new view is established on this source_node and retransmitted // install message is received just before user this message. if (state() == S_GATHER) { // Sanity check MessageNodeList::const_iterator self( install_message_->node_list().find(uuid())); gcomm_assert(self != install_message_->node_list().end() && MessageNodeList::value(self).operational() == true); // Mark all operational nodes in install message as // committed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_committed(true); } } shift_to(S_INSTALL); } // Other instances installed view before this one, so it is // safe to shift to S_OPERATIONAL // Mark all operational nodes in install message as installed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_installed(true); } } gu_trace(shift_to(S_OPERATIONAL)); if (pending_leave_ == true) { close(); } } } else { log_debug << self_string() << " unhandled user message " << msg; } } void gcomm::evs::Proto::handle_user(const UserMessage& msg, NodeMap::iterator ii, const Datagram& rb) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_USER_MSGS) << "received " << msg; if (msg.source_view_id() != current_view_.id()) { handle_user_from_different_view(inst, msg); // Handling user message from different view may cause shift // to operational or leaving state. Check the view ID again and if it // matches to current view proceed to handling the message. if (msg.source_view_id() != current_view_.id()) { return; } assert(state() == S_OPERATIONAL || state() == S_LEAVING); } if (install_message_) { // Install message has been received, which means that the // members of the group already got into agreement about the // set of delivered messages. return; } Range range; Range prev_range; seqno_t prev_aru; seqno_t prev_safe; prev_aru = input_map_->aru_seq(); prev_range = input_map_->range(inst.index()); // Insert only if msg seq is greater or equal than current lowest unseen if (msg.seq() >= prev_range.lu()) { Datagram im_dgram(rb, rb.offset()); im_dgram.normalize(); gu_trace(range = input_map_->insert(inst.index(), msg, im_dgram)); if (range.lu() > prev_range.lu()) { inst.set_tstamp(gu::datetime::Date::monotonic()); } else { evs_log_debug(D_USER_MSGS) << "Not timestamping due to user msg: range.lu: " << range.lu() << " prev_range.lu(): " << prev_range.lu(); } } else { evs_log_debug(D_USER_MSGS) << "Not timestamping due to user msg: msg.seq: " << msg.seq() << " prev_range.lu(): " << prev_range.lu(); range = prev_range; } // Update im safe seq for self update_im_safe_seq(NodeMap::value(self_i_).index(), input_map_->aru_seq()); // Update safe seq for message source prev_safe = update_im_safe_seq(inst.index(), msg.aru_seq()); // Check for missing messages if (range.hs() > range.lu() && (msg.flags() & Message::F_RETRANS) == 0) { request_retrans(msg.source(), msg.source(), range); } // Seqno range completion and acknowledgement const seqno_t max_hs(input_map_->max_hs()); if (output_.empty() == true && (state() == S_OPERATIONAL || state() == S_GATHER) && (msg.flags() & Message::F_MSG_MORE) == 0 && (last_sent_ < max_hs)) { // Message not originated from this instance, output queue is empty // and last_sent seqno should be advanced gu_trace(complete_user(max_hs)); } else if (output_.empty() == true && input_map_->aru_seq() != prev_aru) { // Output queue empty and aru changed, send gap to inform others evs_log_debug(D_GAP_MSGS) << "sending empty gap"; gu_trace(send_gap(EVS_CALLER, UUID::nil(), current_view_.id(), Range())); } // Send messages if (state() == S_OPERATIONAL) { size_t n_sent(0); while (output_.empty() == false) { int err; gu_trace(err = send_user(send_window_)); if (err != 0) { if (err == EAGAIN && n_sent == 0) { // If the send window was exhausted, send a gap // message to advance aru_seq/safe_seq on peers. gu_trace(send_gap(EVS_CALLER, UUID::nil(), current_view_.id(), Range())); } break; } else { ++n_sent; } } } // Deliver messages gu_trace(deliver()); gu_trace(deliver_local()); // If in recovery state, send join each time input map aru seq reaches // last sent and either input map aru or safe seq has changed. if (state() == S_GATHER && consensus_.highest_reachable_safe_seq() == input_map_->aru_seq() && (prev_aru != input_map_->aru_seq() || prev_safe != input_map_->safe_seq()) && (msg.flags() & Message::F_RETRANS) == 0) { gcomm_assert(output_.empty() == true); if (consensus_.is_consensus() == false) { gu_trace(send_join()); } } } void gcomm::evs::Proto::handle_delegate(const DelegateMessage& msg, NodeMap::iterator ii, const Datagram& rb) { gcomm_assert(ii != known_.end()); evs_log_debug(D_DELEGATE_MSGS) << "delegate message " << msg; std::pair, size_t> umsg; gu_trace(umsg = unserialize_message(UUID::nil(), rb)); if (not umsg.first) { return; } gu_trace(handle_msg(*umsg.first, Datagram(rb, umsg.second), false)); } void gcomm::evs::Proto::handle_gap(const GapMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_GAP_MSGS) << "gap message " << msg; if ((msg.flags() & Message::F_COMMIT) != 0) { log_debug << self_string() << " commit gap from " << msg.source(); if (state() == S_GATHER && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id() && install_message_->fifo_seq() == msg.seq()) { inst.set_committed(true); inst.set_tstamp(gu::datetime::Date::monotonic()); if (is_all_committed() == true) { shift_to(S_INSTALL); gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range()));; } } else if (state() == S_GATHER && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id() && install_message_->fifo_seq() < msg.seq()) { // new install message has been generated shift_to(S_GATHER, true); } else { evs_log_debug(D_GAP_MSGS) << " unhandled commit gap " << msg; } return; } else if (state() == S_INSTALL && install_message_ != 0 && install_message_->install_view_id() == msg.source_view_id()) { evs_log_debug(D_STATE) << "install gap " << msg; inst.set_installed(true); inst.set_tstamp(gu::datetime::Date::monotonic()); if (is_all_installed() == true) { gu_trace(shift_to(S_OPERATIONAL)); if (pending_leave_ == true) { close(); } } return; } else if (msg.source_view_id() != current_view_.id()) { if (state() == S_LEAVING) { // Silently drop return; } if (is_msg_from_previous_view(msg) == true) { evs_log_debug(D_FOREIGN_MSGS) << "gap message from previous view"; return; } if (inst.operational() == false) { evs_log_debug(D_STATE) << "dropping message from unoperational source " << msg.source(); } else if (inst.installed() == false) { evs_log_debug(D_STATE) << "dropping message from uninstalled source " << msg.source(); } else { log_debug << "unhandled gap message " << msg; } return; } gcomm_assert(msg.source_view_id() == current_view_.id()); // seqno_t prev_safe; prev_safe = update_im_safe_seq(inst.index(), msg.aru_seq()); // Deliver messages and update tstamp only if safe_seq changed // for the source. if (prev_safe != input_map_->safe_seq(inst.index())) { inst.set_tstamp(gu::datetime::Date::monotonic()); } // if (msg.range_uuid() == uuid()) { if (msg.range().hs() > last_sent_ && (state() == S_OPERATIONAL || state() == S_GATHER)) { // This could be leaving node requesting messages up to // its last sent. gu_trace(complete_user(msg.range().hs())); } const seqno_t upper_bound( std::min(msg.range().hs(), last_sent_)); if (msg.range().lu() <= upper_bound) { gu_trace(resend(msg.source(), Range(msg.range().lu(), upper_bound))); } } else if ((msg.flags() & Message::F_RETRANS) != 0 && msg.source() != uuid()) { gu_trace(recover(msg.source(), msg.range_uuid(), msg.range())); } // if (state() == S_OPERATIONAL) { if (output_.empty() == false) { while (output_.empty() == false) { int err; gu_trace(err = send_user(send_window_)); if (err != 0) break; } } else { const seqno_t max_hs(input_map_->max_hs()); if (last_sent_ < max_hs) { gu_trace(complete_user(max_hs)); } } } gu_trace(deliver()); gu_trace(deliver_local()); // if (state() == S_GATHER && consensus_.highest_reachable_safe_seq() == input_map_->aru_seq() && prev_safe != input_map_->safe_seq() ) { gcomm_assert(output_.empty() == true); if (consensus_.is_consensus() == false) { gu_trace(send_join()); } } } bool gcomm::evs::Proto::update_im_safe_seqs(const MessageNodeList& node_list) { bool updated = false; // Update input map state for (MessageNodeList::const_iterator i = node_list.begin(); i != node_list.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const Node& local_node(NodeMap::value(known_.find_checked(node_uuid))); const MessageNode& node(MessageNodeList::value(i)); gcomm_assert(node.view_id() == current_view_.id()); const seqno_t safe_seq(node.safe_seq()); seqno_t prev_safe_seq; gu_trace(prev_safe_seq = update_im_safe_seq(local_node.index(), safe_seq)); if (prev_safe_seq != safe_seq && input_map_->safe_seq(local_node.index()) == safe_seq) { updated = true; } } return updated; } void gcomm::evs::Proto::retrans_leaves(const MessageNodeList& node_list) { for (NodeMap::const_iterator li = known_.begin(); li != known_.end(); ++li) { const Node& local_node(NodeMap::value(li)); if (local_node.leave_message() != 0 && local_node.is_inactive() == false) { MessageNodeList::const_iterator msg_li( node_list.find(NodeMap::key(li))); if (msg_li == node_list.end() || MessageNodeList::value(msg_li).leaving() == false) { const LeaveMessage& lm(*NodeMap::value(li).leave_message()); LeaveMessage send_lm(lm.version(), lm.source(), lm.source_view_id(), lm.seq(), lm.aru_seq(), lm.fifo_seq(), Message::F_RETRANS | Message::F_SOURCE); gu::Buffer buf; serialize(send_lm, buf); Datagram dg(buf); gu_trace(send_delegate(dg, UUID::nil())); } } } } class SelectSuspectsOp { public: SelectSuspectsOp(gcomm::evs::MessageNodeList& nl) : nl_(nl) { } void operator()(const gcomm::evs::MessageNodeList::value_type& vt) const { if (gcomm::evs::MessageNodeList::value(vt).suspected() == true) { nl_.insert_unique(vt); } } private: gcomm::evs::MessageNodeList& nl_; }; void gcomm::evs::Proto::check_suspects(const UUID& source, const MessageNodeList& nl) { assert(source != uuid()); MessageNodeList suspected; for_each(nl.begin(), nl.end(), SelectSuspectsOp(suspected)); for (MessageNodeList::const_iterator i(suspected.begin()); i != suspected.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const MessageNode& node(MessageNodeList::value(i)); if (node.suspected() == true) { if (node_uuid != uuid()) { size_t s_cnt(0); // Iterate over join messages to see if majority of current // view agrees with the suspicion for (NodeMap::const_iterator j(known_.begin()); j != known_.end(); ++j) { const JoinMessage* jm(NodeMap::value(j).join_message()); if (jm != 0 && jm->source() != node_uuid && current_view_.is_member(jm->source()) == true) { MessageNodeList::const_iterator mni(jm->node_list().find(node_uuid)); if (mni != jm->node_list().end()) { const MessageNode& mn(MessageNodeList::value(mni)); if (mn.suspected() == true) { ++s_cnt; } } } } const Node& kn(NodeMap::value(known_.find_checked(node_uuid))); if (kn.operational() == true && s_cnt > current_view_.members().size()/2) { evs_log_info(I_STATE) << " declaring suspected " << node_uuid << " as inactive"; set_inactive(node_uuid); } } } } } void gcomm::evs::Proto::cross_check_inactives(const UUID& source, const MessageNodeList& nl) { assert(source != uuid()); // Do elimination by suspect status NodeMap::const_iterator source_i(known_.find_checked(source)); for (MessageNodeList::const_iterator i(nl.begin()); i != nl.end(); ++i) { const UUID& node_uuid(MessageNodeList::key(i)); const MessageNode& node(MessageNodeList::value(i)); if (node.operational() == false) { NodeMap::iterator local_i(known_.find(node_uuid)); if (local_i != known_.end() && node_uuid != uuid()) { const Node& local_node(NodeMap::value(local_i)); if (local_node.suspected()) { // This node is suspecting and the source node has // already set inactve, mark also locally inactive. set_inactive(node_uuid); } } } } } // Asymmetry elimination: // 1a) Find all joins that has this node marked as operational and which // this node considers operational // 1b) Mark all operational nodes without join message unoperational // 2) Iterate over join messages gathered in 1a, find all // unoperational entries and mark them unoperational too void gcomm::evs::Proto::asymmetry_elimination() { // Allow some time to pass from setting install timers to get // join messages accumulated. const gu::datetime::Date now(gu::datetime::Date::monotonic()); TimerList::const_iterator ti( find_if(timers_.begin(), timers_.end(), TimerSelectOp(T_INSTALL))); assert(ti != timers_.end()); if (ti == timers_.end()) { log_warn << "install timer not set in asymmetry_elimination()"; return; } if (install_timeout_ - suspect_timeout_ < TimerList::key(ti) - now) { // No check yet return; } // Record initial operational state for logging std::vector oparr_before(known_.size()); size_t index(0); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { oparr_before[index] = (NodeMap::value(i).operational() == true); index++; } std::list joins; // Compose list of join messages for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); const Node& node(NodeMap::value(i)); const JoinMessage* jm(node.join_message()); if (jm != 0) { MessageNodeList::const_iterator self_ref( jm->node_list().find(uuid())); if (node.operational() == true && self_ref != jm->node_list().end() && MessageNodeList::value(self_ref).operational() == true) { joins.push_back(NodeMap::value(i).join_message()); } } else if (node.operational() == true) { evs_log_info(I_STATE) << "marking operational node " << node_uuid << " without " << "join message inactive in asymmetry elimination"; set_inactive(node_uuid); } } // Setting node inactive may remove join message and so invalidate // pointer in joins list, so collect set of UUIDs to set inactive // and do inactivation in separate loop. std::set to_inactive; // Iterate over join messages and collect nodes to be set inactive for (std::list::const_iterator i(joins.begin()); i != joins.end(); ++i) { for (MessageNodeList::const_iterator j((*i)->node_list().begin()); j != (*i)->node_list().end(); ++j) { if (MessageNodeList::value(j).operational() == false) { to_inactive.insert(MessageNodeList::key(j)); } } } joins.clear(); for (std::set::const_iterator i(to_inactive.begin()); i != to_inactive.end(); ++i) { NodeMap::const_iterator ni(known_.find(*i)); if (ni != known_.end()) { if (NodeMap::value(ni).operational() == true) { evs_log_info(I_STATE) << "setting " << *i << " inactive in asymmetry elimination"; set_inactive(*i); } } else { log_warn << "node " << *i << " not found from known list in ae"; } } // Compute final state and log if it has changed std::vector oparr_after(known_.size()); index = 0; for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { oparr_after[index] = (NodeMap::value(i).operational() == true); index++; } if (oparr_before != oparr_after) { evs_log_info(I_STATE) << "before asym elimination"; if (info_mask_ & I_STATE) { std::copy(oparr_before.begin(), oparr_before.end(), std::ostream_iterator(std::cerr, " ")); std::cerr << "\n"; } evs_log_info(I_STATE) << "after asym elimination"; if (info_mask_ & I_STATE) { std::copy(oparr_after.begin(), oparr_after.end(), std::ostream_iterator(std::cerr, " ")); std::cerr << "\n"; } } } // For each node thas has no join message associated, iterate over other // known nodes' join messages to find out if the node without join message // should be declared inactive. void gcomm::evs::Proto::check_unseen() { for (NodeMap::iterator i(known_.begin()); i != known_.end(); ++i) { const UUID& node_uuid(NodeMap::key(i)); Node& node(NodeMap::value(i)); if (node_uuid != uuid() && current_view_.is_member(node_uuid) == false && node.join_message() == 0 && node.operational() == true) { evs_log_debug(D_STATE) << "checking operational unseen " << node_uuid; size_t cnt(0), inact_cnt(0); for (NodeMap::iterator j(known_.begin()); j != known_.end(); ++j) { const JoinMessage* jm(NodeMap::value(j).join_message()); if (jm == 0 || NodeMap::key(j) == uuid()) { continue; } MessageNodeList::const_iterator mn_i; for (mn_i = jm->node_list().begin(); mn_i != jm->node_list().end(); ++mn_i) { NodeMap::const_iterator known_i( known_.find(MessageNodeList::key(mn_i))); if (known_i == known_.end() || (MessageNodeList::value(mn_i).operational() == true && NodeMap::value(known_i).join_message() == 0)) { evs_log_debug(D_STATE) << "all joins not locally present for " << NodeMap::key(j) << " join message node list"; return; } } if ((mn_i = jm->node_list().find(node_uuid)) != jm->node_list().end()) { const MessageNode& mn(MessageNodeList::value(mn_i)); evs_log_debug(D_STATE) << "found " << node_uuid << " from " << NodeMap::key(j) << " join message: " << mn.view_id() << " " << mn.operational(); if (mn.view_id() != ViewId(V_REG)) { ++cnt; if (mn.operational() == false) ++inact_cnt; } } } if (cnt > 0 && cnt == inact_cnt) { evs_log_info(I_STATE) << "unseen node marked inactive by others (cnt=" << cnt << ", inact_cnt=" << inact_cnt << ")"; set_inactive(node_uuid); } } } } // Iterate over all join messages. If some node has nil view id and suspected // flag true in all present join messages, declare it inactive. void gcomm::evs::Proto::check_nil_view_id() { size_t join_counts(0); std::map nil_counts; for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const JoinMessage* jm(NodeMap::value(i).join_message()); if (jm == 0) { continue; } ++join_counts; for (MessageNodeList::const_iterator j(jm->node_list().begin()); j != jm->node_list().end(); ++j) { const MessageNode& mn(MessageNodeList::value(j)); if (mn.view_id() == ViewId(V_REG)) { // todo: investigate why removing mn.suspected() == true // condition causes some unit tests to fail if (mn.suspected() == true) { const UUID& uuid(MessageNodeList::key(j)); ++nil_counts[uuid]; } } } } for (std::map::const_iterator i(nil_counts.begin()); i != nil_counts.end(); ++i) { if (i->second == join_counts && is_inactive(i->first) == false) { log_info << "node " << i->first << " marked with nil view id and suspected in all present" << " join messages, declaring inactive"; set_inactive(i->first); } } } bool gcomm::evs::Proto::join_rate_limit() const { gu::datetime::Date now(gu::datetime::Date::monotonic()); // Limit join message sending. It is likely that // the transfer of user messages which were flushed into network // in shift to GATHER state takes some time. Too frequent join message // send will cause unwanted retransmits which will pile up in the // socket send queue. if (now < last_sent_join_tstamp_ + 100*gu::datetime::MSec) { evs_log_debug(D_JOIN_MSGS) << "join rate limit"; return true; } return false; } void gcomm::evs::Proto::handle_join(const JoinMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED); Node& inst(NodeMap::value(ii)); evs_log_debug(D_JOIN_MSGS) << "handle_join " << msg; if (state() == S_LEAVING) { if (msg.source_view_id() == current_view_.id()) { inst.set_tstamp(gu::datetime::Date::monotonic()); // Join messages are needed for detecting gaps in message // sequences on other nodes. inst.set_join_message(&msg); MessageNodeList same_view; for_each(msg.node_list().begin(), msg.node_list().end(), SelectNodesOp(same_view, current_view_.id(), true, true)); if (update_im_safe_seqs(same_view) == true) { gu_trace(send_leave(false)); } request_missing(); } return; } else if (is_msg_from_previous_view(msg) == true) { return; } else if (install_message_ != 0) { // Note: don't send join from this branch yet, join is // sent at the end of this method if (install_message_->source() == msg.source()) { evs_log_info(I_STATE) << "shift to gather due to representative " << msg.source() << " join"; if (msg.source_view_id() == install_message_->install_view_id()) { // Representative reached operational state, we follow // Other instances installed view before this one, so it is // safe to shift to S_OPERATIONAL // Mark all operational nodes in install message as installed for (MessageNodeList::const_iterator mi = install_message_->node_list().begin(); mi != install_message_->node_list().end(); ++mi) { if (MessageNodeList::value(mi).operational() == true) { NodeMap::iterator jj; gu_trace(jj = known_.find_checked( MessageNodeList::key(mi))); NodeMap::value(jj).set_installed(true); } } inst.set_tstamp(gu::datetime::Date::monotonic()); if (state() == S_INSTALL) { gu_trace(shift_to(S_OPERATIONAL)); if (pending_leave_ == true) { close(); return; } // proceed to process actual join message } else { log_warn << self_string() << "received join message from new " << "view while in GATHER, dropping"; return; } } gu_trace(shift_to(S_GATHER, false)); } else if (consensus_.is_consistent(*install_message_) == true) { return; // Commented out: It seems to be better strategy to // just wait source of inconsistent join to time out // instead of shifting to gather. #443 // if (consensus_.is_consistent(msg) == true) // { // return; // } // else // { // log_warn << "join message not consistent " << msg; // log_info << "state (stderr): "; // std::cerr << *this << std::endl; // // gu_trace(shift_to(S_GATHER, false)); // } } else { evs_log_info(I_STATE) << "shift to GATHER, install message is " << "inconsistent when handling join from " << msg.source() << " " << msg.source_view_id(); evs_log_info(I_STATE) << "state: " << *this; gu_trace(shift_to(S_GATHER, false)); } } else if (state() != S_GATHER) { evs_log_info(I_STATE) << " shift to GATHER while handling join message from " << msg.source() << " " << msg.source_view_id(); gu_trace(shift_to(S_GATHER, false)); } gcomm_assert(output_.empty() == true); // If source node is member of current view but has already // formed new view, mark it unoperational if (current_view_.is_member(msg.source()) == true && msg.source_view_id().seq() > current_view_.id().seq()) { evs_log_info(I_STATE) << " join source has already formed new view, marking inactive"; set_inactive(msg.source()); return; } // Collect view ids to gather_views_ list. // Add unseen nodes to known list and evicted nodes to evicted list. // Evicted nodes must also be added to known list for GATHER time // bookkeeping. // No need to adjust node state here, it is done later on in // check_suspects()/cross_check_inactives(). for (MessageNodeList::const_iterator i(msg.node_list().begin()); i != msg.node_list().end(); ++i) { NodeMap::iterator ni(known_.find(MessageNodeList::key(i))); const UUID mn_uuid(MessageNodeList::key(i)); const MessageNode& mn(MessageNodeList::value(i)); gather_views_.insert(std::make_pair(mn.view_id(), gu::datetime::Date::monotonic())); if (ni == known_.end()) { known_.insert_unique( std::make_pair(mn_uuid, Node(*this))); } // Evict nodes according to join message if (mn_uuid != uuid() && mn.evicted() == true) { set_inactive(mn_uuid); if (is_evicted(mn_uuid) == false) { evict(mn_uuid); } } } // Timestamp source if it sees processing node as operational. // Adjust local entry operational status. MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() != self) { if(MessageNodeList::value(self).operational() == true) { inst.set_tstamp(gu::datetime::Date::monotonic()); } else { evs_log_info(I_STATE) << " declaring source " << msg.source() << " as inactive (mutual exclusion)"; set_inactive(msg.source()); } } inst.set_join_message(&msg); // Select nodes that are coming from the same view as seen by // message source MessageNodeList same_view; for_each(msg.node_list().begin(), msg.node_list().end(), SelectNodesOp(same_view, current_view_.id(), true, true)); // Find out self from node list MessageNodeList::const_iterator nlself_i(same_view.find(uuid())); // Other node coming from the same view if (msg.source() != uuid() && msg.source_view_id() == current_view_.id()) { gcomm_assert(nlself_i != same_view.end()); // Update input map state (void)update_im_safe_seqs(same_view); // Find out max hs and complete up to that if needed MessageNodeList::const_iterator max_hs_i( max_element(same_view.begin(), same_view.end(), RangeHsCmp())); const seqno_t max_hs(MessageNodeList::value(max_hs_i).im_range().hs()); if (last_sent_ < max_hs) { gu_trace(complete_user(max_hs)); } } // Request missing messages from other nodes. request_missing(); // Retrans leave messages that others are missing gu_trace(retrans_leaves(same_view)); // Make cross check to resolve conflict if two nodes // declare each other inactive. There is no need to make // this for own messages. if (msg.source() != uuid()) { gu_trace(check_suspects(msg.source(), same_view)); gu_trace(cross_check_inactives(msg.source(), same_view)); gu_trace(check_unseen()); gu_trace(check_nil_view_id()); } // Eliminate asymmetry according to operational status flags in // join messages gu_trace(asymmetry_elimination()); // If current join message differs from current state, send new join const JoinMessage* curr_join(NodeMap::value(self_i_).join_message()); MessageNodeList new_nl; populate_node_list(&new_nl); if (curr_join == 0 || (curr_join->aru_seq() != input_map_->aru_seq() || curr_join->seq() != input_map_->safe_seq() || curr_join->node_list() != new_nl)) { gu_trace(create_join()); if (consensus_.is_consensus() == false && not join_rate_limit()) { send_join(false); } } if (consensus_.is_consensus() == true) { if (is_representative(uuid()) == true) { gu_trace(send_install(EVS_CALLER)); } } } void gcomm::evs::Proto::handle_leave(const LeaveMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& node(NodeMap::value(ii)); evs_log_debug(D_LEAVE_MSGS) << "handle_leave " << msg; // Leave messages must be always handled. They carry aru_seq information // which is used to retrasmit missing messages. node.set_leave_message(&msg); if (msg.source() == uuid()) { // The last one to live, instant close. Otherwise continue // serving until it becomes apparent that others have // leave message. if (current_view_.members().size() == 1) { gu_trace(shift_to(S_CLOSED)); } } else { // Always set node nonoperational if leave message is seen node.set_operational(false); if (msg.source_view_id() != current_view_.id() || is_msg_from_previous_view(msg) == true) { // Silent drop return; } const seqno_t prev_safe_seq(update_im_safe_seq(node.index(), msg.aru_seq())); if (prev_safe_seq != input_map_->safe_seq(node.index())) { node.set_tstamp(gu::datetime::Date::monotonic()); } if (state() == S_OPERATIONAL) { evs_log_info(I_STATE) << " shift to GATHER when handling leave from " << msg.source() << " " << msg.source_view_id(); gu_trace(shift_to(S_GATHER, true)); } else if (state() == S_GATHER && prev_safe_seq != input_map_->safe_seq(node.index())) { gu_trace(send_join()); } } } void gcomm::evs::Proto::handle_install(const InstallMessage& msg, NodeMap::iterator ii) { assert(ii != known_.end()); assert(state() != S_CLOSED && state() != S_JOINING); Node& inst(NodeMap::value(ii)); evs_log_debug(D_INSTALL_MSGS) << "install msg " << msg; if (state() == S_LEAVING) { // Check if others have receievd leave message or declared // as unoperational before shifting to closed. MessageNodeList::const_iterator mn_i(msg.node_list().find(uuid())); if (mn_i != msg.node_list().end()) { const MessageNode& mn(MessageNodeList::value(mn_i)); if (mn.operational() == false || mn.leaving() == true) { gu_trace(shift_to(S_CLOSED)); } } return; } else if (state() == S_OPERATIONAL) { // Drop install messages in operational state. evs_log_debug(D_INSTALL_MSGS) << "dropping install message in already installed view"; return; } else if (inst.operational() == false) { // Message source is not seen as operational, must not accept // anything from it. evs_log_debug(D_INSTALL_MSGS) << "install message source " << msg.source() << " is not operational, discarding message"; return; } else if (is_msg_from_previous_view(msg) == true) { // Delayed install message evs_log_debug(D_FOREIGN_MSGS) << " dropping install message from previous view"; return; } else if (install_message_ != 0) { if (msg.source() == install_message_->source() && msg.install_view_id().seq() > install_message_->install_view_id().seq()) { // Representative regenerated install message evs_log_debug(D_INSTALL_MSGS) << "regenerated install message"; setall_committed(false); setall_installed(false); delete install_message_; install_message_ = 0; // Fall through to process new install message } else if (msg.source() == install_message_->source()) { // Duplicate or delayed install message evs_log_debug(D_INSTALL_MSGS) << "duplicate or delayed install message"; return; } else { MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() == self || MessageNodeList::value(self).operational() == false) { evs_log_debug(D_INSTALL_MSGS) << "dropping install message, processing node not in " << "new view"; } else { // Two nodes decided to generate install message simultaneously, // shift to gather to combine groups in install messages. log_warn << self_string() << " shift to GATHER due to conflicting install " << "messages"; gu_trace(shift_to(S_GATHER)); } return; } } else if (inst.installed() == true) { log_warn << self_string() << " shift to GATHER due to inconsistent state"; gu_trace(shift_to(S_GATHER)); return; } // Construct join from install message so that the most recent // information from representative is updated to local state. if (msg.source() != uuid()) { const MessageNode& mn( MessageNodeList::value( msg.node_list().find_checked(msg.source()))); JoinMessage jm(msg.version(), msg.source(), mn.view_id(), msg.seq(), msg.aru_seq(), msg.fifo_seq(), msg.node_list()); handle_join(jm, ii); } // Drop install message if processing node won't be part of the // view to be installed. // Don't set nodes that are forming another view inactive here, // they should enter new view shortly after install message // delivery and should be ready to restart GATHER round. MessageNodeList::const_iterator self(msg.node_list().find(uuid())); if (msg.node_list().end() == self || MessageNodeList::value(self).operational() == false) { evs_log_debug(D_INSTALL_MSGS) << "dropping install message, processing node not in new view"; return; } // Proceed to install phase assert(install_message_ == 0); // Run through known nodes and remove each entry that is // not member of current view or present in install message. // This is to prevent inconsistent view of group when first message(s) // from new node are received after install message on representative // and before install message on other nodes. bool changed(false); NodeMap::iterator i, i_next; for (NodeMap::iterator i(known_.begin()); i != known_.end(); i = i_next) { i_next = i, ++i_next; const UUID& uuid(NodeMap::key(i)); if (msg.node_list().find(uuid) == msg.node_list().end() && current_view_.members().find(uuid) == current_view_.members().end()) { log_info << self_string() << " temporarily discarding known " << uuid << " due to received install message"; known_.erase(i); changed = true; } } // Recreate join message to match current state, otherwise is_consistent() // below will fail. if (changed == true) { (void)create_join(); } // See if install message is consistent with local state. // Is_consistent() checks only local state and local join // message in case other nodes have already been seen and reported // nodes that will not be in the next view. if (consensus_.is_consistent(msg) == true) { inst.set_tstamp(gu::datetime::Date::monotonic()); install_message_ = new InstallMessage(msg); assert(install_message_->source() != UUID::nil()); assert(install_message_->flags() != 0); // Send commit gap gu_trace(send_gap(EVS_CALLER, UUID::nil(), install_message_->install_view_id(), Range(), true)); } else { evs_log_debug(D_INSTALL_MSGS) << "install message " << msg << " not consistent with state " << *this; gu_trace(shift_to(S_GATHER, true)); } } void gcomm::evs::Proto::handle_delayed_list(const DelayedListMessage& msg, NodeMap::iterator ii) { if (auto_evict_ == 0) { // Ignore evict list messages if auto_evict_ is disabled. return; } Node& node(NodeMap::value(ii)); node.set_delayed_list_message(&msg); gu::datetime::Date now(gu::datetime::Date::monotonic()); // Construct a list of evict candidates that appear in evict list messages // with cnt greater than local auto_evict_. If evict candidate is reported // by majority of the current group, evict process is triggered. // UUID -> over auto_evict_, total count typedef std::map > Evicts; Evicts evicts; bool found(false); for (NodeMap::const_iterator i(known_.begin()); i != known_.end(); ++i) { const DelayedListMessage* const dlm( NodeMap::value(i).delayed_list_message()); if (dlm == 0) { continue; } else if (dlm->delayed_list().find(uuid()) != dlm->delayed_list().end()) { evs_log_debug(D_STATE) << "found self " << uuid() << " from evict list from " << msg.source() << " at " << get_address(msg.source()); continue; } else if (dlm->tstamp() + delayed_keep_period_ < now) { evs_log_debug(D_STATE) << "ignoring expired evict message"; continue; } for (DelayedListMessage::DelayedList::const_iterator dlm_i(dlm->delayed_list().begin()); dlm_i != dlm->delayed_list().end(); ++dlm_i) { if (dlm_i->second <= 1) { // Don't consider entries with single delayed event as // evict candidates. continue; } std::pair eir( evicts.insert( std::make_pair( dlm_i->first, std::make_pair(0, 0)))); evs_log_debug(D_STATE) << "eir " << eir.first->first << " " << eir.first->second.first << " " << eir.first->second.second; ++eir.first->second.second; // total count if (dlm_i->second >= auto_evict_) { ++eir.first->second.first; // over threshold count found = true; } } } // Evict candidates that have reached threshold count for (Evicts::const_iterator i(evicts.begin()); found == true && i != evicts.end(); ++i) { if (is_evicted(i->first) == true) { // Already evicted, avoid spamming continue; } evs_log_info(I_STATE) << "evict candidate " << i->first << " " << i->second.first << " " << i->second.second; // If the candidate is in the current view, require majority // of the view to agree. If the candidate is not in the current // view, require majority of known nodes to agree. Ability to // evict nodes outside of the group (even while in non-PC) is // needed to stabilize cluster also in the case that nodes // have already partitioned. // TODO: Record stable views from PC and use weights from there // accordingly (need to be added to view). if (i->second.first != 0 && ((current_view_.is_member(i->first) && i->second.second > current_view_.members().size()/2) || i->second.second > known_.size()/2)) { log_warn << "evicting member " << i->first << " at " << get_address(i->first) << " permanently from group"; evict(i->first); if (state() == S_OPERATIONAL) { shift_to(S_GATHER, true); } } } } galera-4-26.4.25/gcomm/src/defaults.hpp000644 000164 177776 00000020545 15107057155 020733 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ #ifndef GCOMM_DEFAULTS_HPP #define GCOMM_DEFAULTS_HPP #include #include "gu_config.hpp" namespace gcomm { struct Defaults { static std::string const ProtonetBackend ; static std::string const ProtonetVersion ; static std::string const SocketChecksum ; static std::string const SocketRecvBufSize ; static std::string const SocketSendBufSize ; static std::string const GMCastVersion ; static std::string const GMCastTcpPort ; static std::string const GMCastMCastTTL ; static std::string const GMCastSegment ; static std::string const GMCastTimeWait ; static std::string const GMCastPeerTimeout ; static std::string const EvsViewForgetTimeout ; static std::string const EvsViewForgetTimeoutMin ; static std::string const EvsInactiveCheckPeriod ; static std::string const EvsSuspectTimeout ; static std::string const EvsSuspectTimeoutMin ; static std::string const EvsInactiveTimeout ; static std::string const EvsInactiveTimeoutMin ; static std::string const EvsKeepalivePeriod ; static std::string const EvsKeepalivePeriodMin ; static std::string const EvsCausalKeepalivePeriod ; static std::string const EvsJoinRetransPeriod ; static std::string const EvsJoinRetransPeriodMin ; static std::string const EvsStatsReportPeriod ; static std::string const EvsStatsReportPeriodMin ; static std::string const EvsDebugLogMask ; static std::string const EvsInfoLogMask ; static std::string const EvsSendWindow ; static std::string const EvsSendWindowMin ; static std::string const EvsUserSendWindow ; static std::string const EvsUserSendWindowMin ; static std::string const EvsMaxInstallTimeouts ; static std::string const EvsDelayMargin ; static std::string const EvsDelayedKeepPeriod ; static std::string const EvsAutoEvict ; static std::string const EvsVersion ; static std::string const EvsUseAggregate ; static std::string const PcAnnounceTimeout ; static std::string const PcChecksum ; static std::string const PcIgnoreQuorum ; static std::string const PcIgnoreSb ; static std::string const PcNpvo ; static std::string const PcVersion ; static std::string const PcWaitPrim ; static std::string const PcWaitPrimTimeout ; static std::string const PcWeight ; static std::string const PcRecovery ; static std::string const PcLinger ; }; struct Flags { static const int BaseHost = gu::Config::Flag::read_only; static const int BasePort = gu::Config::Flag::read_only | gu::Config::Flag::type_integer; static const int ProtonetBackend = gu::Config::Flag::read_only | gu::Config::Flag::deprecated; static const int ProtonetVersion = gu::Config::Flag::read_only | gu::Config::Flag::deprecated; // Hidden because not documented / does not seem to be used? static const int TcpNonBlocking = gu::Config::Flag::hidden; static const int SocketChecksum = gu::Config::Flag::read_only | gu::Config::Flag::type_integer; static const int SocketRecvBufSize = 0; static const int SocketSendBufSize = 0; static const int GMCastVersion = gu::Config::Flag::read_only; static const int GMCastGroup = gu::Config::Flag::read_only; static const int GMCastListenAddr = gu::Config::Flag::read_only; static const int GMCastMCastAddr = gu::Config::Flag::read_only; // Hidden because undocumented static const int GMCastMCastPort = gu::Config::Flag::hidden | gu::Config::Flag::read_only | gu::Config::Flag::type_integer; static const int GMCastMCastTTL = gu::Config::Flag::read_only | gu::Config::Flag::type_integer; static const int GMCastTimeWait = gu::Config::Flag::read_only | gu::Config::Flag::type_duration; static const int GMCastPeerTimeout = gu::Config::Flag::read_only | gu::Config::Flag::type_duration;; // Hidden because undocumented static const int GMCastMaxInitialReconnectAttempts = gu::Config::Flag::hidden | gu::Config::Flag::type_integer; static const int GMCastPeerAddr = 0; // Hidden because undocumented, potentially dangerous static const int GMCastIsolate = gu::Config::Flag::hidden | gu::Config::Flag::type_integer; static const int GMCastSegment = gu::Config::Flag::read_only | gu::Config::Flag::type_integer; static const int EvsVersion = gu::Config::Flag::read_only; static const int EvsViewForgetTimeout = gu::Config::Flag::read_only | gu::Config::Flag::type_duration; static const int EvsSuspectTimeout = gu::Config::Flag::type_duration; static const int EvsInactiveTimeout = gu::Config::Flag::type_duration; static const int EvsInactiveCheckPeriod = gu::Config::Flag::type_duration; static const int EvsInstallTimeout = gu::Config::Flag::type_duration; static const int EvsKeepalivePeriod = gu::Config::Flag::type_duration; static const int EvsJoinRetransPeriod = gu::Config::Flag::type_duration; static const int EvsStatsReportPeriod = gu::Config::Flag::type_duration; static const int EvsDebugLogMask = 0; static const int EvsInfoLogMask = 0; static const int EvsSendWindow = gu::Config::Flag::type_integer; static const int EvsUserSendWindow = gu::Config::Flag::type_integer; static const int EvsUseAggregate = gu::Config::Flag::type_bool; static const int EvsCausalKeepalivePeriod = gu::Config::Flag::type_duration; static const int EvsMaxInstallTimeouts = gu::Config::Flag::type_integer; static const int EvsDelayMargin = gu::Config::Flag::type_duration; static const int EvsDelayedKeepPeriod = gu::Config::Flag::type_duration; static const int EvsEvict = 0; static const int EvsAutoEvict = gu::Config::Flag::read_only | gu::Config::Flag::type_bool; static const int PcVersion = gu::Config::Flag::read_only; static const int PcIgnoreSb = gu::Config::Flag::type_bool; static const int PcIgnoreQuorum = gu::Config::Flag::type_bool; static const int PcChecksum = gu::Config::Flag::type_bool; static const int PcAnnounceTimeout = gu::Config::Flag::read_only | gu::Config::Flag::type_duration; static const int PcLinger = gu::Config::Flag::read_only | gu::Config::Flag::type_duration; static const int PcNpvo = gu::Config::Flag::type_bool; static const int PcBootstrap = gu::Config::Flag::type_bool; static const int PcWaitPrim = gu::Config::Flag::read_only | gu::Config::Flag::type_bool; static const int PcWaitPrimTimeout = gu::Config::Flag::read_only | gu::Config::Flag::type_duration; static const int PcWeight = gu::Config::Flag::type_integer; static const int PcRecovery = gu::Config::Flag::read_only | gu::Config::Flag::type_bool; }; } #endif // GCOMM_DEFAULTS_HPP galera-4-26.4.25/gcomm/CMakeLists.txt000644 000164 177776 00000000152 15107057155 020354 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_subdirectory(src) add_subdirectory(test) galera-4-26.4.25/gcomm/doc/000755 000164 177776 00000000000 15107057160 016357 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcomm/doc/Doxyfile000644 000164 177776 00000143705 15107057155 020103 0ustar00jenkinsnogroup000000 000000 # Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GComm # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src ../src/gcomm # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h *.hpp # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-4-26.4.25/gcomm/test/000755 000164 177776 00000000000 15107057160 016571 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcomm/test/check_util.cpp000644 000164 177776 00000013125 15107057155 021415 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "gcomm/util.hpp" #include "gcomm/protonet.hpp" #include "gcomm/datagram.hpp" #include "gcomm/conf.hpp" #include "gcomm/uuid.hpp" #include "check_gcomm.hpp" #include "gu_logger.hpp" #include #include #include #include #include using std::vector; using std::numeric_limits; using std::string; using namespace gcomm; using gu::Exception; using gu::byte_t; using gu::Buffer; START_TEST(test_datagram) { // Header check gcomm::NetHeader hdr(42, 0); ck_assert(hdr.len() == 42); ck_assert(hdr.has_crc32() == false); ck_assert(hdr.version() == 0); hdr.set_crc32(1234, NetHeader::CS_CRC32); ck_assert(hdr.has_crc32() == true); ck_assert(hdr.len() == 42); gcomm::NetHeader hdr1(42, 1); ck_assert(hdr1.len() == 42); ck_assert(hdr1.has_crc32() == false); ck_assert(hdr1.version() == 1); gu::byte_t hdrbuf[NetHeader::serial_size_]; ck_assert(serialize(hdr1, hdrbuf, sizeof(hdrbuf), 0) == NetHeader::serial_size_); try { unserialize(hdrbuf, sizeof(hdrbuf), 0, hdr); ck_abort(); } catch (Exception& e) { // ok } gu::byte_t b[128]; for (gu::byte_t i = 0; i < sizeof(b); ++i) { b[i] = i; } gu::Buffer buf(b, b + sizeof(b)); gcomm::Datagram dg(buf); ck_assert(dg.len() == sizeof(b)); // Normal copy construction gcomm::Datagram dgcopy(buf); ck_assert(dgcopy.len() == sizeof(b)); ck_assert(memcmp(dgcopy.header() + dgcopy.header_offset(), dg.header() + dg.header_offset(), dg.header_len()) == 0); ck_assert(dgcopy.payload() == dg.payload()); // Copy construction from offset of 16 gcomm::Datagram dg16(dg, 16); log_info << dg16.len(); ck_assert(dg16.len() - dg16.offset() == sizeof(b) - 16); for (gu::byte_t i = 0; i < sizeof(b) - 16; ++i) { ck_assert(dg16.payload()[i + dg16.offset()] == i + 16); } #if 0 // Normalize datagram, all data is moved into payload, data from // beginning to offset is discarded. Normalization must not change // dg dg16.normalize(); ck_assert(dg16.len() == sizeof(b) - 16); for (byte_t i = 0; i < sizeof(b) - 16; ++i) { ck_assert(dg16.payload()[i] == i + 16); } ck_assert(dg.len() == sizeof(b)); for (byte_t i = 0; i < sizeof(b); ++i) { ck_assert(dg.payload()[i] == i); } Datagram dgoff(buf, 16); dgoff.header().resize(8); dgoff.set_header_offset(4); ck_assert(dgoff.len() == buf.size() + 4); ck_assert(dgoff.header_offset() == 4); ck_assert(dgoff.header().size() == 8); for (byte_t i = 0; i < 4; ++i) { *(&dgoff.header()[0] + i) = i; } dgoff.normalize(); ck_assert(dgoff.len() == sizeof(b) - 16 + 4); ck_assert(dgoff.header_offset() == 0); ck_assert(dgoff.header().size() == 0); #endif // 0 } END_TEST START_TEST(test_view_state) { // compare view. UUID view_uuid(NULL, 0); ViewId view_id(V_TRANS, view_uuid, 789); UUID m1(NULL, 0); UUID m2(NULL, 0); View view(0, view_id, true); view.add_member(m1, 0); view.add_member(m2, 1); View view2; { std::ostringstream os; view.write_stream(os); std::istringstream is(os.str()); view2.read_stream(is); ck_assert(view == view2); } // Create configuration to set file name. gu::Config conf; // compare view state. UUID my_uuid(NULL, 0); ViewState vst(my_uuid, view, conf); UUID my_uuid_2; View view_2; ViewState vst2(my_uuid_2, view_2, conf); { std::ostringstream os; vst.write_stream(os); std::istringstream is(os.str()); vst2.read_stream(is); ck_assert(vst == vst2); } // test write file and read file. vst.write_file(); UUID my_uuid_3; View view_3; ViewState vst3(my_uuid_3, view_3, conf); vst3.read_file(); ck_assert(vst == vst3); ViewState::remove_file(conf); } END_TEST /* With -D_GLIBCXX_DEBUG, std::set_intersection() asserts if the * ranges are not irreflexive. */ START_TEST(test_set_intersection_irreflexive_assertion) { View view1; View view2; UUID uuid(NULL, 0); view1.add_joined(uuid, 0); view2.add_joined(uuid, 0); ck_assert(view1.joined().begin() != view1.joined().end()); const auto joined_begin11 = *view1.joined().begin(); const auto joined_begin12 = *view1.joined().begin(); ck_assert(!(joined_begin11 < joined_begin12)); ck_assert(!(*view1.joined().begin() < *view1.joined().begin())); ck_assert(view2.joined().begin() != view2.joined().end()); ck_assert(!(*view2.joined().begin() < *view2.joined().begin())); std::map intersection; std::set_intersection(view1.joined().begin(), view1.joined().end(), view2.joined().begin(), view2.joined().end(), std::inserter(intersection, intersection.begin())); ck_assert(intersection.size() == 1); ck_assert(intersection.begin()->first == uuid); } END_TEST Suite* util_suite() { Suite* s = suite_create("util"); TCase* tc; tc = tcase_create("test_datagram"); tcase_add_test(tc, test_datagram); suite_add_tcase(s, tc); tc = tcase_create("test_view_state"); tcase_add_test(tc, test_view_state); suite_add_tcase(s, tc); tc = tcase_create("test_set_intersection_irreflexive_assertion"); tcase_add_test(tc, test_set_intersection_irreflexive_assertion); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/CMakeLists.txt000644 000164 177776 00000002153 15107057155 021336 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_executable(check_gcomm check_fair_send_queue.cpp check_gcomm.cpp check_gmcast.cpp check_trace.cpp check_types.cpp check_util.cpp check_evs2.cpp check_pc.cpp ) target_link_libraries(check_gcomm gcomm ${GALERA_UNIT_TEST_LIBS} ) # TODO: Fix target_compile_options(check_gcomm PRIVATE -Wno-conversion -Wno-unused-parameter -Wno-overloaded-virtual ) add_test( NAME check_gcomm COMMAND check_gcomm WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/Testing ) # # Nondeterministic unit tests, must be run manually. # add_executable(check_gcomm_nondet check_gcomm_nondet.cpp check_gmcast_nondet.cpp check_pc_nondet.cpp check_util_nondet.cpp) target_compile_options(check_gcomm_nondet PRIVATE -Wno-conversion -Wno-overloaded-virtual -Wno-unused-parameter ) target_link_libraries(check_gcomm_nondet gcomm ${GALERA_UNIT_TEST_LIBS}) # # Old SSL test, must be run manually. # add_executable(ssl_test ssl_test.cpp) target_compile_options(ssl_test PRIVATE -Wno-unused-parameter) target_link_libraries(ssl_test gcomm) galera-4-26.4.25/gcomm/test/check_trace.cpp000644 000164 177776 00000034234 15107057155 021542 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2014 Codership Oy * * $Id$ */ /*! * @brief Check trace implementation */ #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() #include // std::cerr using namespace std; using namespace gu; using namespace gcomm; struct CheckTraceConfInit { explicit CheckTraceConfInit(gu::Config& conf) { gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); } }; extern "C" void check_trace_log_cb(int severity, const char* msg) { std::cerr << gu::datetime::Date::monotonic() << ": " << msg << "\n"; } // This is to avoid static initialization fiasco with gcomm::Conf static members // Ideally it is the latter which should be wrapped in a function, but, unless // this is used to initialize another static object, it should be fine. gu::Config& check_trace_conf() { static gu::Config conf; static CheckTraceConfInit const check_trace_conf_init(conf); return conf; } std::unique_ptr DummyTransport::net_; Protonet& DummyTransport::get_net() { // Unit tests are single threaded, no need to worry about thread // synchronization here. if (not net_) net_ = std::unique_ptr(Protonet::create(check_trace_conf()));; return *net_; } ostream& gcomm::operator<<(ostream& os, const TraceMsg& msg) { return (os << "(" << msg.source() << "," << msg.source_view_id() << "," << msg.seq() << ")"); } ostream& gcomm::operator<<(ostream& os, const ViewTrace& vtr) { os << vtr.view() << ": "; copy(vtr.msgs().begin(), vtr.msgs().end(), ostream_iterator(os, " ")); return os; } ostream& gcomm::operator<<(ostream& os, const Trace& tr) { os << "trace: \n"; os << tr.view_traces(); return os; } ostream& gcomm::operator<<(ostream& os, const Channel& ch) { return (os << "(" << ch.latency() << "," << ch.loss() << ")"); } ostream& gcomm::operator<<(ostream& os, const Channel* chp) { return (os << *chp); } ostream& gcomm::operator<<(ostream& os, const MatrixElem& me) { return (os << "(" << me.ii() << "," << me.jj() << ")"); } ostream& gcomm::operator<<(ostream& os, const PropagationMatrix& prop) { os << "("; copy(prop.prop_.begin(), prop.prop_.end(), ostream_iterator(os, ",")); os << ")"; return os; } class LinkOp { public: LinkOp(DummyNode& node, ChannelMap& prop) : node_(node), prop_(prop) { } void operator()(NodeMap::value_type& l) { if (NodeMap::key(l) != node_.index()) { ChannelMap::iterator ii; gu_trace(ii = prop_.insert_unique( make_pair(MatrixElem(node_.index(), NodeMap::key(l)), new Channel(check_trace_conf())))); gcomm::connect(ChannelMap::value(ii), node_.protos().front()); gu_trace(ii = prop_.insert_unique( make_pair(MatrixElem(NodeMap::key(l), node_.index()), new Channel(check_trace_conf())))); gcomm::connect(ChannelMap::value(ii), NodeMap::value(l)->protos().front()); } } private: DummyNode& node_; ChannelMap& prop_; }; class PropagateOp { public: PropagateOp(NodeMap& tp) : tp_(tp) { } void operator()(ChannelMap::value_type& vt) { ChannelMsg cmsg(vt.second->get()); if (cmsg.rb().len() != 0) { NodeMap::iterator i(tp_.find(vt.first.jj())); gcomm_assert(i != tp_.end()); gu_trace(NodeMap::value(i)->protos().front()->handle_up( &tp_, cmsg.rb(), ProtoUpMeta(cmsg.source()))); } } private: NodeMap& tp_; }; class ExpireTimersOp { public: ExpireTimersOp() { } void operator()(NodeMap::value_type& vt) { NodeMap::value(vt)->handle_timers(); } }; void gcomm::Channel::put(const Datagram& rb, const UUID& source) { Datagram dg(rb); // if (dg.is_normalized() == false) // { // dg.normalize(); // } queue_.push_back(make_pair(latency_, ChannelMsg(dg, source))); } ChannelMsg gcomm::Channel::get() { while (queue_.empty() == false) { pair& p(queue_.front()); if (p.first == 0) { // todo: packet loss goes here if (loss() < 1.) { double rnd(double(rand())/double(RAND_MAX)); if (loss() < rnd) { queue_.pop_front(); return ChannelMsg(Datagram(), UUID::nil()); } } ChannelMsg ret(p.second); queue_.pop_front(); return ret; } else { --p.first; return ChannelMsg(Datagram(), UUID::nil()); } } return ChannelMsg(Datagram(), UUID::nil()); } gcomm::PropagationMatrix::~PropagationMatrix() { for_each(prop_.begin(), prop_.end(), ChannelMap::DeleteObject()); } void gcomm::PropagationMatrix::insert_tp(DummyNode* t) { gu_trace(tp_.insert_unique(make_pair(t->index(), t))); for_each(tp_.begin(), tp_.end(), LinkOp(*t, prop_)); } void gcomm::PropagationMatrix::set_latency(const size_t ii, const size_t jj, const size_t lat) { ChannelMap::iterator i; gu_trace(i = prop_.find_checked(MatrixElem(ii, jj))); ChannelMap::value(i)->set_latency(lat); } void gcomm::PropagationMatrix::set_loss(const size_t ii, const size_t jj, const double loss) { ChannelMap::iterator i; gu_trace(i = prop_.find_checked(MatrixElem(ii, jj))); ChannelMap::value(i)->set_loss(loss); } void gcomm::PropagationMatrix::split(const size_t ii, const size_t jj) { set_loss(ii, jj, 0.); set_loss(jj, ii, 0.); } void gcomm::PropagationMatrix::merge(const size_t ii, const size_t jj, const double loss) { set_loss(ii, jj, loss); set_loss(jj, ii, loss); } void gcomm::PropagationMatrix::expire_timers() { for_each(tp_.begin(), tp_.end(), ExpireTimersOp()); } void gcomm::PropagationMatrix::propagate_n(size_t n) { while (n-- > 0) { for_each(prop_.begin(), prop_.end(), PropagateOp(tp_)); } } void gcomm::PropagationMatrix::propagate_until_empty() { do { for_each(prop_.begin(), prop_.end(), PropagateOp(tp_)); } while (count_channel_msgs() > 0); } void gcomm::PropagationMatrix::propagate_until_cvi(bool handle_timers) { bool all_in = false; do { propagate_n(10); all_in = all_in_cvi(); if (all_in == false && handle_timers == true) { expire_timers(); } if (handle_timers) { // Assume that time progresses in 50 millisecond intervals // and that is fine enough granularity for all tests // which deal with timers. gu::datetime::SimClock::inc_time(50*gu::datetime::MSec); } } while (all_in == false); } size_t gcomm::PropagationMatrix::count_channel_msgs() const { size_t ret = 0; for (ChannelMap::const_iterator i = prop_.begin(); i != prop_.end(); ++i) { ret += ChannelMap::value(i)->n_msgs(); } return ret; } bool gcomm::PropagationMatrix::all_in_cvi() const { for (std::map::const_iterator i = tp_.begin(); i != tp_.end(); ++i) { if (i->second->in_cvi() == false) { return false; } } return true; } static void check_traces(const Trace& t1, const Trace& t2) { for (Trace::ViewTraceMap::const_iterator i = t1.view_traces().begin(); i != t1.view_traces().end(); ++i) { Trace::ViewTraceMap::const_iterator j = t2.view_traces().find(Trace::ViewTraceMap::key(i)); if (j == t2.view_traces().end()) continue; ViewType type = i->first.type(); // @todo Proper checks for PRIM and NON_PRIM if (type == V_TRANS || type == V_REG) { Trace::ViewTraceMap::const_iterator i_next(i); ++i_next; Trace::ViewTraceMap::const_iterator j_next(j); ++j_next; if (type == V_TRANS) { // if next reg view is same, then views and msgs are the same. if (i_next != t1.view_traces().end() && j_next != t2.view_traces().end() && i_next->first == j_next->first) { gcomm_assert(*i == *j) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "next views: \n\n" << *i_next << "\n\n" << *j_next; } } if (type == V_REG) { // members are same all the times. gcomm_assert(i->second.view().members() == j->second.view().members()) << "trace differ: \n\n" << *i << "\n\n" << *j; // if next trans view has same members, then msgs are the same. if (i_next != t1.view_traces().end() && j_next != t2.view_traces().end()) { if (i_next->second.view().members() == j_next->second.view().members()) { gcomm_assert(i->second.msgs() == j->second.msgs()) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "next views: \n\n" << *i_next << "\n\n" << *j_next; } else { // Next trans view may or may not contain same members. // E.g. if paritioning happens during group reconfiguration, // one of the partitions may complete the group reconfiguration // with partitioned members, but the other partition will run // the reconfiguration protocol again, ending with different // but overlapping members. // // Also, joining members may be different. // // We skip the check for now. } } if (i == t1.view_traces().begin() || j == t2.view_traces().begin()) continue; Trace::ViewTraceMap::const_iterator i_prev(i); --i_prev; Trace::ViewTraceMap::const_iterator j_prev(j); --j_prev; if (i_prev->first == j_prev->first) { // if previous trans view id is the same. // the reg view should be the same. gcomm_assert(i->second.view() == j->second.view()) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "previous views: \n\n" << *i_prev << "\n\n" << *j_prev; } else { // Previous trans view id is not same: // Union of joined sets should match members. // Note that intersction of joined sets may be non-empty, // e.g. in case of (1),(2),(3) -> (1, 2, 3) // the view event for both 1 and 2 will have 3 in joined set. NodeList joined_union; size_t left_size = 0, part_size = 0; std::set_union(i->second.view().joined().begin(), i->second.view().joined().end(), j->second.view().joined().begin(), j->second.view().joined().end(), std::inserter(joined_union, joined_union.begin())); gcomm_assert(i->second.view().members() == joined_union) << "union of joined sets does not match members: \n\n" << i->second.view().members() << "\n\n" << joined_union; // intersections of left, partitioned sets are empty. NodeList output; std::set_intersection(i->second.view().left().begin(), i->second.view().left().end(), j->second.view().left().begin(), j->second.view().left().end(), std::inserter(output, output.begin())); left_size = output.size(); output.clear(); std::set_intersection(i->second.view().partitioned().begin(), i->second.view().partitioned().end(), j->second.view().partitioned().begin(), j->second.view().partitioned().end(), std::inserter(output, output.begin())); part_size = output.size(); output.clear(); gcomm_assert( /* members are same */ i->second.view().members() == j->second.view().members() && left_size == 0 && part_size == 0) << "trace differ: \n\n" << *i << "\n\n" << *j << "\n\n" "previous views: \n\n" << *i_prev << "\n\n" << *j_prev; } } } } } class CheckTraceOp { public: CheckTraceOp(const vector& nvec) : nvec_(nvec) { } void operator()(const DummyNode* n) const { for (vector::const_iterator i = nvec_.begin(); i != nvec_.end(); ++i) { if ((*i)->index() != n->index()) { check_traces((*i)->trace(), n->trace()); } } } private: const vector& nvec_; }; void gcomm::check_trace(const vector& nvec) { for_each(nvec.begin(), nvec.end(), CheckTraceOp(nvec)); } galera-4-26.4.25/gcomm/test/check_pc_nondet.cpp000644 000164 177776 00000022210 15107057155 022404 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2019-2020 Codership Oy */ #include "check_gcomm.hpp" #include "pc_message.hpp" #include "pc_proto.hpp" #include "evs_proto.hpp" #include "gu_datetime.hpp" #include "gcomm/transport.hpp" #include "gu_asio.hpp" #include using namespace gcomm; using namespace gu; using namespace gu::datetime; using namespace std; using namespace std::rel_ops; class PCUser2 : public Toplay { Transport* tp_; bool sending_; uint8_t my_type_; bool send_; Period send_period_; Date next_send_; PCUser2(const PCUser2&); void operator=(const PCUser2); public: PCUser2(Protonet& net, const string& uri, const bool send = true) : Toplay(net.conf()), tp_(Transport::create(net, uri)), sending_(false), my_type_(static_cast(1 + ::rand()%4)), send_(send), send_period_("PT0.05S"), next_send_(Date::max()) { } ~PCUser2() { delete tp_; } void start() { gcomm::connect(tp_, this); tp_->connect(); gcomm::disconnect(tp_, this); tp_->pstack().push_proto(this); } void stop() { sending_ = false; tp_->pstack().pop_proto(this); gcomm::connect(tp_, this); tp_->close(); gcomm::disconnect(tp_, this); } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view()) { const View& view(um.view()); log_info << view; if (view.type() == V_PRIM && send_ == true) { sending_ = true; next_send_ = Date::monotonic() + send_period_; } } else { // log_debug << "received message: " << um.get_to_seq(); ck_assert(rb.len() - rb.offset() == 16); if (um.source() == tp_->uuid()) { ck_assert(um.user_type() == my_type_); } } } Protostack& pstack() { return tp_->pstack(); } Date handle_timers() { Date now(Date::monotonic()); if (now >= next_send_) { byte_t buf[16]; memset(buf, 0xa, sizeof(buf)); Datagram dg(Buffer(buf, buf + sizeof(buf))); // dg.get_header().resize(128); // dg.set_header_offset(128); int ret = send_down(dg, ProtoDownMeta(my_type_, rand() % 10 == 0 ? O_SAFE : O_LOCAL_CAUSAL)); if (ret != 0) { // log_debug << "send down " << ret; } next_send_ = next_send_ + send_period_; } return next_send_; } std::string listen_addr() const { return tp_->listen_addr(); } }; START_TEST(test_pc_transport) { log_info << "START (test_pc_transport)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr net(Protonet::create(conf)); PCUser2 pu1(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1"); gu_conf_self_tstamp_on(); pu1.start(); net->event_loop(5*Sec); PCUser2 pu2(*net, std::string("pc://") + pu1.listen_addr().erase(0, strlen("tcp://")) + "?evs.info_log_mask=0xff&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "pc.recovery=0&" "node.name=n2"); PCUser2 pu3(*net, std::string("pc://") + pu1.listen_addr().erase(0, strlen("tcp://")) + "?evs.info_log_mask=0xff&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "pc.recovery=0&" "node.name=n3"); pu2.start(); net->event_loop(5*Sec); pu3.start(); net->event_loop(5*Sec); pu3.stop(); net->event_loop(5*Sec); pu2.stop(); net->event_loop(5*Sec); pu1.stop(); log_info << "cleanup"; net->event_loop(0); log_info << "finished"; } END_TEST START_TEST(test_set_param) { log_info << "START (test_pc_transport)"; gu::Config conf; Protolay::sync_param_cb_t sync_param_cb; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr net(Protonet::create(conf)); PCUser2 pu1(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1"); pu1.start(); // no such a parameter ck_assert(net->set_param("foo.bar", "1", sync_param_cb) == false); const evs::seqno_t send_window( gu::from_string(conf.get("evs.send_window"))); const evs::seqno_t user_send_window( gu::from_string(conf.get("evs.user_send_window"))); try { net->set_param("evs.send_window", gu::to_string(user_send_window - 1), sync_param_cb); ck_abort_msg("exception not thrown"); } catch (gu::Exception& e) { ck_assert_msg(e.get_errno() == ERANGE, "%d: %s",e.get_errno(),e.what()); } try { net->set_param("evs.user_send_window", gu::to_string(send_window + 1), sync_param_cb); ck_abort_msg("exception not thrown"); } catch (gu::Exception& e) { ck_assert_msg(e.get_errno() == ERANGE, "%d: %s",e.get_errno(),e.what()); } // Note: These checks may have to change if defaults are changed ck_assert(net->set_param( "evs.send_window", gu::to_string(send_window - 1), sync_param_cb) == true); ck_assert(gu::from_string(conf.get("evs.send_window")) == send_window - 1); ck_assert(net->set_param( "evs.user_send_window", gu::to_string(user_send_window + 1), sync_param_cb) == true); ck_assert(gu::from_string( conf.get("evs.user_send_window")) == user_send_window + 1); pu1.stop(); } END_TEST START_TEST(test_trac_599) { class D : public gcomm::Toplay { public: D(gu::Config& conf) : gcomm::Toplay(conf) { } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { } }; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); D d(conf); std::unique_ptr pnet(gcomm::Protonet::create(conf)); std::unique_ptr tp( gcomm::Transport::create (*pnet,"pc://?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0" "&pc.recovery=0")); gcomm::connect(tp.get(), &d); gu::Buffer buf(10); Datagram dg(buf); int err; err = tp->send_down(dg, gcomm::ProtoDownMeta()); ck_assert_msg(err == ENOTCONN, "%d", err); tp->connect(true); buf.resize(tp->mtu()); Datagram dg2(buf); err = tp->send_down(dg2, gcomm::ProtoDownMeta()); ck_assert_msg(err == 0, "%d", err); buf.resize(buf.size() + 1); Datagram dg3(buf); err = tp->send_down(dg3, gcomm::ProtoDownMeta()); ck_assert_msg(err == EMSGSIZE, "%d", err); pnet->event_loop(gu::datetime::Sec); tp->close(); } END_TEST // test for forced teardown START_TEST(test_trac_620) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr net(Protonet::create(conf)); Transport* tp(Transport::create(*net, "pc://?" "evs.info_log_mask=0xff&" "gmcast.listen_addr=tcp://127.0.0.1:0&" "gmcast.group=pc&" "gmcast.time_wait=PT0.5S&" "pc.recovery=0&" "node.name=n1")); class D : public gcomm::Toplay { public: D(gu::Config& conf) : gcomm::Toplay(conf) { } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { } }; D d(conf); gcomm::connect(tp, &d); tp->connect(true); tp->close(true); gcomm::disconnect(tp, &d); delete tp; } END_TEST Suite* pc_nondet_suite() { Suite* s = suite_create("gcomm::pc_nondet"); TCase* tc; tc = tcase_create("test_pc_transport"); tcase_add_test(tc, test_pc_transport); tcase_set_timeout(tc, 35); suite_add_tcase(s, tc); tc = tcase_create("test_set_param"); tcase_add_test(tc, test_set_param); suite_add_tcase(s, tc); tc = tcase_create("test_trac_599"); tcase_add_test(tc, test_trac_599); suite_add_tcase(s, tc); tc = tcase_create("test_trac_620"); tcase_add_test(tc, test_trac_620); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_templ.hpp000644 000164 177776 00000004654 15107057155 021575 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #ifndef GCOMM_CHECK_TEMPL_HPP #define GCOMM_CHECK_TEMPL_HPP #include "gcomm/types.hpp" #include "gcomm/transport.hpp" #include #include #include namespace gcomm { template void check_serialization(const T& c, const size_t expected_size, const T& default_c) { ck_assert_msg(c.serial_size() == expected_size, "size = %zu expected = %zu", c.serial_size(), expected_size); gu::byte_t* buf = new gu::byte_t[expected_size + 7]; size_t ret; // Check that what is written gets also read try { (void)c.serialize(buf, expected_size, 1); std::ostringstream os; os << c; ck_abort_msg("exception not thrown for %s", os.str().c_str()); } catch (gu::Exception& e) { // OK } ck_assert(c.serialize(buf, expected_size, 0) == expected_size); T c2(default_c); // Commented out. This test happened to work because default // protocol version for messages was zero and if the second // byte of the buffer contained something else, exception was // thrown. Now that the version can be different from zero, // the outcome of this check depends on message structure. // try // { // size_t res(c2.unserialize(buf, expected_size, 1)); // std::ostringstream os; // os << c; // ck_abort_msg("exception not thrown for %s, result %zu expected %zu", // os.str().c_str(), res, expected_size); // } // catch (gu::Exception& e) // { // // OK // } ret = c2.unserialize(buf, expected_size, 0); ck_assert_msg(ret == expected_size, "expected %zu ret %zu", expected_size, ret); if ((c == c2) == false) { log_warn << "\n\t" << c << " !=\n\t" << c2; } ck_assert(c == c2); // Check that read/write return offset properly ck_assert(c.serialize(buf, expected_size + 7, 5) == expected_size + 5); ck_assert(c2.unserialize(buf, expected_size + 7, 5) == expected_size + 5); ck_assert(c == c2); delete[] buf; } } // namespace gcomm #endif // CHECK_TEMPL_HPP galera-4-26.4.25/gcomm/test/check_util_nondet.cpp000644 000164 177776 00000003772 15107057155 022773 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "gcomm/util.hpp" #include "gcomm/protonet.hpp" #include "gcomm/datagram.hpp" #include "gcomm/conf.hpp" #include "check_gcomm.hpp" #include "gu_logger.hpp" #ifdef HAVE_ASIO_HPP #include "asio_protonet.hpp" #endif // HAVE_ASIO_HPP #include #include #include #include #include using std::vector; using std::numeric_limits; using std::string; using namespace gcomm; using gu::Exception; using gu::byte_t; using gu::Buffer; #if defined(HAVE_ASIO_HPP) START_TEST(test_asio) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); AsioProtonet pn(conf); string uri_str("tcp://127.0.0.1:0"); auto acc(pn.acceptor(uri_str)); acc->listen(uri_str); uri_str = acc->listen_addr(); SocketPtr cl = pn.socket(uri_str); cl->connect(uri_str); pn.event_loop(gu::datetime::Sec); SocketPtr sr = acc->accept(); ck_assert(sr->state() == Socket::S_CONNECTED); vector buf(cl->mtu()); for (size_t i = 0; i < buf.size(); ++i) { buf[i] = static_cast(i & 0xff); } for (size_t i = 0; i < 13; ++i) { Datagram dg(Buffer(&buf[0], &buf[0] + buf.size())); cl->send(0, dg); } pn.event_loop(gu::datetime::Sec); } END_TEST #endif // HAVE_ASIO_HPP START_TEST(test_protonet) { gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); mark_point(); Protonet* pn(Protonet::create(conf)); ck_assert(pn != NULL); pn->event_loop(1); mark_point(); delete pn; } END_TEST Suite* util_nondet_suite() { Suite* s = suite_create("util_nondet"); TCase* tc; #ifdef HAVE_ASIO_HPP tc = tcase_create("test_asio"); tcase_add_test(tc, test_asio); suite_add_tcase(s, tc); #endif // HAVE_ASIO_HPP tc = tcase_create("test_protonet"); tcase_add_test(tc, test_protonet); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_gcomm_nondet.cpp000644 000164 177776 00000004455 15107057155 023117 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2019 Codership Oy */ #include "check_gcomm.hpp" #include "gu_string_utils.hpp" // strsplit() #include "gu_exception.hpp" #include "gu_logger.hpp" #include "gu_crc32c.h" // gu_crc32c_configure() #include #include #include #include #include #include #include // * suits = 0; FILE* log_file(0); if (argc > 1 && !strcmp(argv[1],"nofork")) { srunner_set_fork_status(sr, CK_NOFORK); } else if (argc > 1 && strcmp(argv[1], "nolog") == 0) { /* no log redirection */} else { // running in the background, loggin' to file log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); // redirect occasional stderr there as well if (dup2(fileno(log_file), 2) < 0) { perror("dup2() failed: "); return EXIT_FAILURE; } } if (::getenv("CHECK_GCOMM_DEBUG")) { gu_log_max_level = GU_LOG_DEBUG; //gu::Logger::enable_debug(true); } gu_crc32c_configure(); log_info << "check_gcomm_nondet, start tests"; if (::getenv("CHECK_GCOMM_SUITES")) { suits = new vector(gu::strsplit(::getenv("CHECK_GCOMM_SUITES"), ',')); } for (size_t i = 0; suites[i].suite != 0; ++i) { if (suits == 0 || find(suits->begin(), suits->end(), suites[i].name) != suits->end()) { srunner_add_suite(sr, suites[i].suite()); } } delete suits; suits = 0; srunner_run_all(sr, CK_NORMAL); log_info << "check_gcomm, run all tests"; int n_fail = srunner_ntests_failed(sr); srunner_free(sr); if (0 == n_fail && 0 != log_file) ::unlink(LOG_FILE); return n_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-4-26.4.25/gcomm/test/check_gmcast.cpp000644 000164 177776 00000032203 15107057155 021714 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2025 Codership Oy */ #include "check_gcomm.hpp" #include "gmcast.hpp" #include namespace { struct FakeSocket : public gcomm::Socket { void connect(const gu::URI& uri) override {} void close() override {} void set_option(const std::string& key, const std::string& val) override { } int send(int segment, const gcomm::Datagram& dg) override { return 0; } void async_receive() override {} size_t mtu() const override { return 1024; } std::string remote_addr() const override { return "127.0.0.1:2"; } std::string local_addr() const override { return "127.0.0.1:1"; } gcomm::Socket::State state() const override { return gcomm::Socket::S_CONNECTED; } gcomm::SocketId id() const override { return reinterpret_cast(this); } gcomm::SocketStats stats() const override { return gcomm::SocketStats(); } FakeSocket() : Socket(gu::URI("tcp://127.0.0.1:1")) { } ~FakeSocket() override = default; }; struct RelaySetFixture : public gcomm::gmcast::ProtoContext { ~RelaySetFixture() { for (auto proto : proto_set) { delete proto; } } std::set proto_set{}; std::set nonlive_uuids{}; gcomm::GMCast::RelaySet relay_set{}; /* Convenience UUIDs */ const gcomm::UUID uuid1{ 1 }; const gcomm::UUID uuid2{ 2 }; const gcomm::UUID uuid3{ 3 }; const gcomm::UUID uuid4{ 4 }; const gcomm::UUID uuid5{ 5 }; /* Begin of ProtoContext implementation */ /* Uuid1 is the local node */ const gcomm::UUID& node_uuid() const override { return uuid1; } bool is_own(const gcomm::gmcast::Proto* proto) const override { return proto->remote_uuid() == node_uuid(); } void blacklist(const gcomm::gmcast::Proto* proto) override { gu_throw_fatal << "Not implemented"; } bool is_not_own_and_duplicate_exists( const gcomm::gmcast::Proto*) const override { return false; } bool is_proto_evicted(const gcomm::gmcast::Proto* proto) const override { gu_throw_fatal << "Not implemented"; return false; } bool prim_view_reached() const override { gu_throw_fatal << "Not implemented"; return false; } void remove_viewstate_file() const override { gu_throw_fatal << "Not implemented"; } std::string self_string() const override { return "node1"; } /* End of ProtoContext implementation */ void add_proto(int idx, uint8_t segment) { const gcomm::UUID uuid{idx}; std::string remote_addr{"127.0.0.1:" + std::to_string(idx)}; auto proto = new gcomm::gmcast::Proto{ *this /* context */, 0 /* version */, std::make_shared< FakeSocket>() /* socket */, "127.0.0.1:1" /* local_addr */, remote_addr /* remote_addr */, "" /* mcast_addr */, segment /* local_segment */, "test" /* group_name */ }; proto->wait_handshake(); gcomm::gmcast::Message handshake_msg{ 0 /* version */, gcomm::gmcast::Message::Type::GMCAST_T_HANDSHAKE, uuid, uuid, segment /* segment_id */ }; proto->handle_handshake(handshake_msg); ck_assert(proto->state() == gcomm::gmcast::Proto::S_HANDSHAKE_RESPONSE_SENT); gcomm::gmcast::Message ok_msg{ 0 /* version */, gcomm::gmcast::Message::Type::GMCAST_T_OK, uuid, segment, "" }; proto->handle_ok(ok_msg); ck_assert(proto->state() == gcomm::gmcast::Proto::S_OK); ck_assert(proto->remote_uuid() == uuid); proto_set.insert(proto); } /* Add proto with default segment 0 */ void add_proto(int idx) { add_proto(idx, 0); } /* Add link from src to dst. The link is added to the proto with uuid * src. */ void add_link(int src, int dst) { auto src_proto = std::find_if(proto_set.begin(), proto_set.end(), [src](const gcomm::gmcast::Proto* p) { return p->remote_uuid() == gcomm::UUID(src); }); auto dst_proto = std::find_if(proto_set.begin(), proto_set.end(), [dst](const gcomm::gmcast::Proto* p) { return p->remote_uuid() == gcomm::UUID(dst); }); ck_assert(src_proto != proto_set.end()); ck_assert(dst_proto != proto_set.end()); gcomm::gmcast::Message::NodeList nl; nl.insert(std::make_pair(gcomm::UUID(dst), gcomm::gmcast::Node("127.0.0.1:" + std::to_string(dst)))); gcomm::gmcast::Message msg{ 0 /* version */, gcomm::gmcast::Message::Type::GMCAST_T_TOPOLOGY_CHANGE, (*src_proto)->remote_uuid(), "test" /* group_name */, nl }; (*src_proto)->handle_topology_change(msg); } }; } /* namespace */ START_TEST(test_gmcast_empty_relay_set) { log_info << "START test_gmcast_empty_relay_set"; RelaySetFixture f; gcomm::GMCast::RelaySet relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 0); ck_assert(relay_set.empty()); } END_TEST START_TEST(test_gmcast_relay_set_same_segment) { log_info << "START test_gmcast_relay_set_same_segment"; RelaySetFixture f; f.add_proto(2); f.add_proto(3); f.add_proto(4); f.add_proto(5); /* Add link from 3 to 2 so that 2 is reachable via 3 */ f.add_link(3, 2); /* No direct link from 1 to 2 */ f.nonlive_uuids.insert(f.uuid2); auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 0); ck_assert(relay_set.size() == 1); ck_assert(relay_set.begin()->proto->remote_uuid() == f.uuid3); ck_assert(f.nonlive_uuids.empty()); } END_TEST START_TEST(test_gmcast_relay_set_same_segment_multiple_paths) { log_info << "START test_gmcast_relay_set_same_segment_multiple_paths"; RelaySetFixture f; f.add_proto(2); f.add_proto(3); f.add_proto(4); f.add_proto(5); /* Add links from 2, 3, 4 to 5 */ f.add_link(2, 5); f.add_link(3, 5); f.add_link(4, 5); f.nonlive_uuids.insert(f.uuid5); auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 0); ck_assert(relay_set.size() == 1); ck_assert(relay_set.begin()->proto->remote_uuid() == f.uuid2 || relay_set.begin()->proto->remote_uuid() == f.uuid3 || relay_set.begin()->proto->remote_uuid() == f.uuid4); ck_assert(f.nonlive_uuids.empty()); } END_TEST START_TEST(test_gmcast_relay_set_multiple_segments) { log_info << "START test_gmcast_relay_set_multiple_segments"; RelaySetFixture f; f.add_proto(2, 0); f.add_proto(3, 0); f.add_proto(4, 1); f.add_proto(5, 1); /* Add links from 2, 3, 4 to 5 */ f.add_link(2, 5); f.add_link(3, 5); f.add_link(4, 5); f.nonlive_uuids.insert(f.uuid5); /* The preferred path is via 4 to 5 as they are in the preferred segment 1 */ auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 1); ck_assert(relay_set.size() == 1); ck_assert(relay_set.begin()->proto->remote_uuid() == f.uuid4); ck_assert(f.nonlive_uuids.empty()); } END_TEST START_TEST(test_gmcast_relay_set_multiple_segments_two) { log_info << "START test_gmcast_relay_set_multiple_segments_two"; RelaySetFixture f; f.add_proto(2, 0); f.add_proto(3, 0); f.add_proto(4, 1); f.add_proto(5, 1); /* Add links from 2, 3, 4 to 5 */ f.add_link(2, 5); f.add_link(3, 5); f.add_link(4, 5); /* Make 4 and 5 unreachable from 1. */ f.nonlive_uuids.insert(f.uuid4); f.nonlive_uuids.insert(f.uuid5); /* The preferred path is via 2 or 3 to 5 as they are in the preferred * segment 1. Node 4 is unreachable from 1. */ auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 1); ck_assert(relay_set.size() == 1); ck_assert(relay_set.begin()->proto->remote_uuid() == f.uuid2 || relay_set.begin()->proto->remote_uuid() == f.uuid3); ck_assert(f.nonlive_uuids.size() == 1); ck_assert(f.nonlive_uuids.count(f.uuid4) == 1); } END_TEST START_TEST(test_gmcast_relay_set_tree) { log_info << "START test_gmcast_relay_set_tree"; RelaySetFixture f; f.add_proto(2); f.add_proto(3); f.add_proto(4); f.add_proto(5); /* Add links from 2 to 4, and from 3 to 5 */ f.add_link(2, 4); f.add_link(3, 5); /* Make 4 and 5 unreachable from 1. */ f.nonlive_uuids.insert(f.uuid4); f.nonlive_uuids.insert(f.uuid5); /* Expect a relay_set of size 2. Node 4 is reachable through node 2, and node 5 through node 3. */ auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 0); ck_assert(relay_set.size() == 2); auto node_2 = std::find_if(relay_set.begin(), relay_set.end(), [&f](const gcomm::GMCast::RelayEntry& entry) { return entry.proto->remote_uuid() == f.uuid2; }); ck_assert(node_2 != relay_set.end()); auto node_3 = std::find_if(relay_set.begin(), relay_set.end(), [&f](const gcomm::GMCast::RelayEntry& entry) { return entry.proto->remote_uuid() == f.uuid3; }); ck_assert(node_3 != relay_set.end()); ck_assert(f.nonlive_uuids.size() == 0); } END_TEST START_TEST(test_gmcast_relay_set_tree_with_segments) { log_info << "START test_gmcast_relay_set_tree_with_segments"; RelaySetFixture f; f.add_proto(2, 0); f.add_proto(3, 1); f.add_proto(4, 0); f.add_proto(5, 1); /* Add links from 2 to 4, and from 3 to 5 */ f.add_link(2, 4); f.add_link(3, 5); /* Make 4 and 5 unreachable from 1. */ f.nonlive_uuids.insert(f.uuid4); f.nonlive_uuids.insert(f.uuid5); /* Expect a relay_set of size 2. Node 4 is reachable through node 2, and node 5 through node 3. */ auto relay_set = gcomm::GMCast::compute_relay_set(f.proto_set, f.nonlive_uuids, 1); ck_assert(relay_set.size() == 2); auto node_2 = std::find_if(relay_set.begin(), relay_set.end(), [&f](const gcomm::GMCast::RelayEntry& entry) { return entry.proto->remote_uuid() == f.uuid2; }); ck_assert(node_2 != relay_set.end()); auto node_3 = std::find_if(relay_set.begin(), relay_set.end(), [&f](const gcomm::GMCast::RelayEntry& entry) { return entry.proto->remote_uuid() == f.uuid3; }); ck_assert(node_3 != relay_set.end()); ck_assert(f.nonlive_uuids.size() == 0); } END_TEST Suite* gmcast_suite() { Suite* s = suite_create("gmcast"); TCase* tc; tc = tcase_create("test_gmcast_empty_relay_set"); tcase_add_test(tc, test_gmcast_empty_relay_set); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_same_segment"); tcase_add_test(tc, test_gmcast_relay_set_same_segment); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_same_segment_multiple_paths"); tcase_add_test(tc, test_gmcast_relay_set_same_segment_multiple_paths); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_multiple_segments"); tcase_add_test(tc, test_gmcast_relay_set_multiple_segments); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_multiple_segments_two"); tcase_add_test(tc, test_gmcast_relay_set_multiple_segments_two); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_tree"); tcase_add_test(tc, test_gmcast_relay_set_tree); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_relay_set_tree_with_segments"); tcase_add_test(tc, test_gmcast_relay_set_tree_with_segments); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_evs2.cpp000644 000164 177776 00000243071 15107057155 021324 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy */ /*! * @file Unit tests for refactored EVS */ #include "evs_proto.hpp" #include "evs_input_map2.hpp" #include "evs_message2.hpp" #include "evs_seqno.hpp" #include "check_gcomm.hpp" #include "check_templ.hpp" #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() #include #include #include #include "check.h" // // set GALERA_TEST_DETERMINISTIC env // variable before running pc test suite. // static class deterministic_tests { public: deterministic_tests() : deterministic_tests_() { if (::getenv("GALERA_TEST_DETERMINISTIC")) { deterministic_tests_ = true; } else { deterministic_tests_ = false; } } bool operator()() const { return deterministic_tests_; } private: bool deterministic_tests_; } deterministic_tests; using namespace std; using namespace std::rel_ops; using namespace gu::datetime; using namespace gcomm; using namespace gcomm::evs; using gu::DeleteObject; void init_rand() { unsigned int seed(static_cast(time(0))); log_info << "rand seed " << seed; srand(seed); } void init_rand(unsigned int seed) { log_info << "rand seed " << seed; srand(seed); } START_TEST(test_range) { log_info << "START"; Range r(3, 6); check_serialization(r, 2 * sizeof(seqno_t), Range()); } END_TEST START_TEST(test_message) { log_info << "START"; UUID uuid1(0, 0); ViewId view_id(V_TRANS, uuid1, 4567); seqno_t seq(478), aru_seq(456), seq_range(7); UserMessage um(0, uuid1, view_id, seq, aru_seq, seq_range, O_SAFE, 75433, 0xab, Message::F_SOURCE); ck_assert(um.serial_size() % 4 == 0); check_serialization(um, um.serial_size(), UserMessage()); AggregateMessage am(0xab, 17457, 0x79); check_serialization(am, 4, AggregateMessage()); DelegateMessage dm(0, uuid1, view_id); dm.set_source(uuid1); check_serialization(dm, dm.serial_size(), DelegateMessage()); MessageNodeList node_list; node_list.insert(make_pair(uuid1, MessageNode())); node_list.insert(make_pair(UUID(2), MessageNode(true, false, 254, true, 1, ViewId(V_REG), 5, Range(7, 8)))); JoinMessage jm(0, uuid1, view_id, 8, 5, 27, node_list); jm.set_source(uuid1); check_serialization(jm, jm.serial_size(), JoinMessage()); InstallMessage im(0, uuid1, view_id, ViewId(V_REG, view_id.uuid(), view_id.seq()), 8, 5, 27, node_list); im.set_source(uuid1); check_serialization(im, im.serial_size(), InstallMessage()); LeaveMessage lm(0, uuid1, view_id, 45, 88, 3456); lm.set_source(uuid1); check_serialization(lm, lm.serial_size(), LeaveMessage()); DelayedListMessage dlm(0, uuid1, view_id, 4576); dlm.add(UUID(2), 23); dlm.add(UUID(3), 45); dlm.add(UUID(5), 255); check_serialization(dlm, dlm.serial_size(), DelayedListMessage()); } END_TEST START_TEST(test_input_map_insert) { log_info << "START"; UUID uuid1(1), uuid2(2); InputMap im; ViewId view(V_REG, uuid1, 0); try { im.insert(0, UserMessage(0, uuid1, view, 0)); ck_abort_msg("Exception not thrown, input map has not been " "reset/initialized yet"); } catch (...) { } im.reset(1); im.insert(0, UserMessage(0, uuid1, view, 0)); im.clear(); im.reset(2); for (seqno_t s = 0; s < 10; ++s) { im.insert(0, UserMessage(0, uuid1, view, s)); im.insert(1, UserMessage(0, uuid2, view, s)); } for (seqno_t s = 0; s < 10; ++s) { InputMap::iterator i = im.find(0, s); ck_assert(i != im.end()); ck_assert(InputMapMsgIndex::value(i).msg().source() == uuid1); ck_assert(InputMapMsgIndex::value(i).msg().seq() == s); i = im.find(1, s); ck_assert(i != im.end()); ck_assert(InputMapMsgIndex::value(i).msg().source() == uuid2); ck_assert(InputMapMsgIndex::value(i).msg().seq() == s); } } END_TEST START_TEST(test_input_map_find) { log_info << "START"; InputMap im; UUID uuid1(1); ViewId view(V_REG, uuid1, 0); im.reset(1); im.insert(0, UserMessage(0, uuid1, view, 0)); ck_assert(im.find(0, 0) != im.end()); im.insert(0, UserMessage(0, uuid1, view, 2)); im.insert(0, UserMessage(0, uuid1, view, 4)); im.insert(0, UserMessage(0, uuid1, view, 7)); ck_assert(im.find(0, 2) != im.end()); ck_assert(im.find(0, 4) != im.end()); ck_assert(im.find(0, 7) != im.end()); ck_assert(im.find(0, 3) == im.end()); ck_assert(im.find(0, 5) == im.end()); ck_assert(im.find(0, 6) == im.end()); ck_assert(im.find(0, 8) == im.end()); } END_TEST START_TEST(test_input_map_safety) { log_info << "START"; InputMap im; UUID uuid1(1); size_t index1(0); ViewId view(V_REG, uuid1, 0); im.reset(1); im.insert(index1, UserMessage(0, uuid1, view, 0)); ck_assert(im.aru_seq() == 0); im.insert(index1, UserMessage(0, uuid1, view, 1)); ck_assert(im.aru_seq() == 1); im.insert(index1, UserMessage(0, uuid1, view, 2)); ck_assert(im.aru_seq() == 2); im.insert(index1, UserMessage(0, uuid1, view, 3)); ck_assert(im.aru_seq() == 3); im.insert(index1, UserMessage(0, uuid1, view, 5)); ck_assert(im.aru_seq() == 3); im.insert(index1, UserMessage(0, uuid1, view, 4)); ck_assert(im.aru_seq() == 5); InputMap::iterator i = im.find(index1, 0); ck_assert(im.is_fifo(i) == true); ck_assert(im.is_agreed(i) == true); ck_assert(im.is_safe(i) == false); im.set_safe_seq(index1, 0); ck_assert(im.is_safe(i) == true); im.set_safe_seq(index1, 5); i = im.find(index1, 5); ck_assert(im.is_safe(i) == true); im.insert(index1, UserMessage(0, uuid1, view, 7)); im.set_safe_seq(index1, im.aru_seq()); i = im.find(index1, 7); ck_assert(im.is_safe(i) == false); } END_TEST START_TEST(test_input_map_erase) { log_info << "START"; InputMap im; size_t index1(0); UUID uuid1(1); ViewId view(V_REG, uuid1, 1); im.reset(1); for (seqno_t s = 0; s < 10; ++s) { im.insert(index1, UserMessage(0, uuid1, view, s)); } for (seqno_t s = 0; s < 10; ++s) { InputMap::iterator i = im.find(index1, s); ck_assert(i != im.end()); im.erase(i); i = im.find(index1, s); ck_assert(i == im.end()); (void)im.recover(index1, s); } im.set_safe_seq(index1, 9); try { im.recover(index1, 9); ck_abort_msg("Exception not thrown, " "setting safe seq should purge index"); } catch (...) { } } END_TEST START_TEST(test_input_map_overwrap) { log_info << "START"; InputMap im; const size_t n_nodes(5); ViewId view(V_REG, UUID(1), 1); vector uuids; for (size_t n = 0; n < n_nodes; ++n) { uuids.push_back(UUID(static_cast(n + 1))); } im.reset(n_nodes); Date start(Date::monotonic()); size_t cnt(0); seqno_t last_safe(-1); for (seqno_t seq = 0; seq < 100000; ++seq) { for (size_t i = 0; i < n_nodes; ++i) { UserMessage um(0, uuids[i], view, seq); (void)im.insert(i, um); if ((seq + 5) % 10 == 0) { last_safe = um.seq() - 3; im.set_safe_seq(i, last_safe); for (InputMap::iterator ii = im.begin(); ii != im.end() && im.is_safe(ii) == true; ii = im.begin()) { im.erase(ii); } } cnt++; } gcomm_assert(im.aru_seq() == seq); gcomm_assert(im.safe_seq() == last_safe); } Date stop(Date::monotonic()); double div(double(stop.get_utc() - start.get_utc())/gu::datetime::Sec); log_info << "input map msg rate " << double(cnt)/div; } END_TEST class InputMapInserter { public: InputMapInserter(InputMap& im) : im_(im) { } void operator()(const pair& p) const { im_.insert(p.first, p.second); } private: InputMap& im_; }; START_TEST(test_input_map_random_insert) { log_info << "START"; init_rand(); seqno_t n_seqnos(1024); size_t n_uuids(4); vector uuids(n_uuids); vector > msgs(static_cast(n_uuids*n_seqnos)); ViewId view_id(V_REG, UUID(1), 1); InputMap im; for (size_t i = 0; i < n_uuids; ++i) { uuids[i] = (static_cast(i + 1)); } im.reset(n_uuids); for (seqno_t j = 0; j < n_seqnos; ++j) { for (size_t i = 0; i < n_uuids; ++i) { msgs[static_cast(j*n_uuids) + i] = make_pair(i, UserMessage(0, uuids[i], view_id, j)); } } vector > random_msgs(msgs); random_shuffle(random_msgs.begin(), random_msgs.end()); for_each(random_msgs.begin(), random_msgs.end(), InputMapInserter(im)); size_t n = 0; for (InputMap::iterator i = im.begin(); i != im.end(); ++i) { const InputMapMsg& msg(InputMapMsgIndex::value(i)); ck_assert(msg.msg() == msgs[n].second); ck_assert(im.is_safe(i) == false); ++n; } ck_assert(im.aru_seq() == n_seqnos - 1); ck_assert(im.safe_seq() == -1); for (size_t i = 0; i < n_uuids; ++i) { ck_assert(im.range(i) == Range(n_seqnos, n_seqnos - 1)); im.set_safe_seq(i, n_seqnos - 1); } ck_assert(im.safe_seq() == n_seqnos - 1); } END_TEST START_TEST(test_input_map_gap_range_list) { gcomm::evs::InputMap im; im.reset(1); gcomm::UUID uuid(1); gcomm::ViewId view_id(gcomm::V_REG, uuid, 1); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 0, 0)); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 2, 0)); std::vector gap_range( im.gap_range_list(0, gcomm::evs::Range(0, 2))); ck_assert(gap_range.size() == 1); ck_assert(gap_range.begin()->lu() == 1); ck_assert(gap_range.begin()->hs() == 1); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 4, 0)); gap_range = im.gap_range_list(0, gcomm::evs::Range(0, 4)); ck_assert(gap_range.size() == 2); ck_assert(gap_range.begin()->lu() == 1); ck_assert(gap_range.begin()->hs() == 1); ck_assert(gap_range.rbegin()->lu() == 3); ck_assert(gap_range.rbegin()->hs() == 3); // Although there are two messages missing, limiting the range to 0,2 // should return only the first one. gap_range = im.gap_range_list(0, gcomm::evs::Range(0, 2)); ck_assert(gap_range.size() == 1); ck_assert(gap_range.begin()->lu() == 1); ck_assert(gap_range.begin()->hs() == 1); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 8, 0)); gap_range = im.gap_range_list(0, gcomm::evs::Range(0, 8)); ck_assert(gap_range.size() == 3); ck_assert(gap_range.begin()->lu() == 1); ck_assert(gap_range.begin()->hs() == 1); ck_assert(gap_range.rbegin()->lu() == 5); ck_assert(gap_range.rbegin()->hs() == 7); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 3, 0)); gap_range = im.gap_range_list(0, gcomm::evs::Range(0, 8)); ck_assert(gap_range.size() == 2); ck_assert(gap_range.begin()->lu() == 1); ck_assert(gap_range.begin()->hs() == 1); ck_assert(gap_range.rbegin()->lu() == 5); ck_assert(gap_range.rbegin()->hs() == 7); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 1, 0)); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 5, 0)); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 6, 0)); im.insert(0, gcomm::evs::UserMessage(0, uuid, view_id, 7, 0)); gap_range = im.gap_range_list(0, gcomm::evs::Range(0, 8)); ck_assert(gap_range.empty()); } END_TEST static Datagram* get_msg(DummyTransport* tp, std::unique_ptr& msg, bool release = true) { Datagram* rb = tp->out(); if (rb != 0) { msg = Proto::unserialize_message(tp->uuid(), *rb).first; if (release == true) { delete rb; } } return rb; } static void single_join(DummyTransport* t, Proto* p) { std::unique_ptr jm, im, gm; // Initial state is joining p->shift_to(Proto::S_JOINING); // Send join must produce emitted join message p->send_join(); Datagram* rb = get_msg(t, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); // Install message is emitted at the end of JOIN handling // 'cause this is the only instance and is always consistent // with itself rb = get_msg(t, im); ck_assert(rb != 0); ck_assert(im->type() == Message::EVS_T_INSTALL); // Handling INSTALL message emits three gap messages, // one for receiving install message (commit gap), one for // shift to install and one for shift to operational rb = get_msg(t, gm); ck_assert(rb != 0); ck_assert(gm->type() == Message::EVS_T_GAP); ck_assert((gm->flags() & Message::F_COMMIT) != 0); rb = get_msg(t, gm); ck_assert(rb != 0); ck_assert(gm->type() == Message::EVS_T_GAP); ck_assert((gm->flags() & Message::F_COMMIT) == 0); rb = get_msg(t, gm); ck_assert(rb != 0); ck_assert(gm->type() == Message::EVS_T_GAP); ck_assert((gm->flags() & Message::F_COMMIT) == 0); // State must have evolved JOIN -> S_GATHER -> S_INSTALL -> S_OPERATIONAL ck_assert(p->state() == Proto::S_OPERATIONAL); // Handle join message again, must stay in S_OPERATIONAL, must not // emit anything p->handle_msg(*jm); rb = get_msg(t, gm); ck_assert(rb == 0); ck_assert(p->state() == Proto::S_OPERATIONAL); } class DummyUser : public Toplay { public: DummyUser(gu::Config& conf) : Toplay(conf) { } void handle_up(const void*, const Datagram&, const ProtoUpMeta&) { } private: }; START_TEST(test_proto_single_join) { log_info << "START"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid(1); DummyTransport t(uuid); mark_point(); DummyUser u(conf); mark_point(); Proto p(conf, uuid, 0); mark_point(); gcomm::connect(&t, &p); gcomm::connect(&p, &u); single_join(&t, &p); } END_TEST static void double_join(DummyTransport* t1, Proto* p1, DummyTransport* t2, Proto* p2) { std::unique_ptr jm; std::unique_ptr im; std::unique_ptr gm; std::unique_ptr gm2; std::unique_ptr msg; Datagram* rb; // Initial states check p2->shift_to(Proto::S_JOINING); ck_assert(p1->state() == Proto::S_OPERATIONAL); ck_assert(p2->state() == Proto::S_JOINING); // Send join message, don't self handle immediately // Expected output: one join message p2->send_join(false); ck_assert(p2->state() == Proto::S_JOINING); rb = get_msg(t2, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(t2, msg); ck_assert(rb == 0); // Handle node 2's join on node 1 // Expected output: shift to S_GATHER and one join message p1->handle_msg(*jm); ck_assert(p1->state() == Proto::S_GATHER); rb = get_msg(t1, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(t1, msg); ck_assert(rb == 0); // Handle node 1's join on node 2 // Expected output: shift to S_GATHER and one join message p2->handle_msg(*jm); ck_assert(p2->state() == Proto::S_GATHER); rb = get_msg(t2, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(t2, msg); ck_assert(rb == 0); // Handle node 2's join on node 1 // Expected output: Install and commit gap messages, state stays in S_GATHER p1->handle_msg(*jm); ck_assert(p1->state() == Proto::S_GATHER); rb = get_msg(t1, im); ck_assert(rb != 0); ck_assert(im->type() == Message::EVS_T_INSTALL); rb = get_msg(t1, gm); ck_assert(rb != 0); ck_assert(gm->type() == Message::EVS_T_GAP); ck_assert((gm->flags() & Message::F_COMMIT) != 0); rb = get_msg(t1, msg); ck_assert(rb == 0); // Handle install message on node 2 // Expected output: commit gap message and state stays in S_RECOVERY p2->handle_msg(*im); ck_assert(p2->state() == Proto::S_GATHER); rb = get_msg(t2, gm2); ck_assert(rb != 0); ck_assert(gm2->type() == Message::EVS_T_GAP); ck_assert((gm2->flags() & Message::F_COMMIT) != 0); rb = get_msg(t2, msg); ck_assert(rb == 0); // Handle gap messages // Expected output: Both nodes shift to S_INSTALL, // both send gap messages p1->handle_msg(*gm2); ck_assert(p1->state() == Proto::S_INSTALL); std::unique_ptr gm12; rb = get_msg(t1, gm12); ck_assert(rb != 0); ck_assert(gm12->type() == Message::EVS_T_GAP); ck_assert((gm12->flags() & Message::F_COMMIT) == 0); rb = get_msg(t1, msg); ck_assert(rb == 0); p2->handle_msg(*gm); ck_assert(p2->state() == Proto::S_INSTALL); std::unique_ptr gm22; rb = get_msg(t2, gm22); ck_assert(rb != 0); ck_assert(gm22->type() == Message::EVS_T_GAP); ck_assert((gm22->flags() & Message::F_COMMIT) == 0); rb = get_msg(t2, msg); ck_assert(rb == 0); // Handle final gap messages, expected output shift to operational // and gap message p1->handle_msg(*gm22); ck_assert(p1->state() == Proto::S_OPERATIONAL); rb = get_msg(t1, msg); ck_assert(rb != 0); ck_assert(msg->type() == Message::EVS_T_GAP); ck_assert((msg->flags() & Message::F_COMMIT) == 0); rb = get_msg(t1, msg); ck_assert(rb == 0); p2->handle_msg(*gm12); ck_assert(p2->state() == Proto::S_OPERATIONAL); rb = get_msg(t2, msg); ck_assert(rb != 0); ck_assert(msg->type() == Message::EVS_T_GAP); ck_assert((msg->flags() & Message::F_COMMIT) == 0); rb = get_msg(t2, msg); ck_assert(rb == 0); } START_TEST(test_proto_double_join) { log_info << "START"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1), uuid2(2); DummyTransport t1(uuid1), t2(uuid2); mark_point(); DummyUser u1(conf), u2(conf); mark_point(); Proto p1(conf, uuid1, 0), p2(conf, uuid2, 0); gcomm::connect(&t1, &p1); gcomm::connect(&p1, &u1); gcomm::connect(&t2, &p2); gcomm::connect(&p2, &u2); single_join(&t1, &p1); double_join(&t1, &p1, &t2, &p2); } END_TEST static gu::Config gu_conf; static DummyNode* create_dummy_node_with_uuid( size_t idx, const gcomm::UUID& uuid, int version, const string& suspect_timeout, const string& inactive_timeout, const string& retrans_period) { // reset conf to avoid stale config in case of nofork gu_conf = gu::Config(); gu::ssl_register_params(gu_conf); gcomm::Conf::register_params(gu_conf); string conf = "evs://?" + Conf::EvsViewForgetTimeout + "=PT1H&" + Conf::EvsInactiveCheckPeriod + "=" + to_string(Period(suspect_timeout)/3) + "&" + Conf::EvsSuspectTimeout + "=" + suspect_timeout + "&" + Conf::EvsInactiveTimeout + "=" + inactive_timeout + "&" + Conf::EvsKeepalivePeriod + "=" + retrans_period + "&" + Conf::EvsJoinRetransPeriod + "=" + retrans_period + "&" + Conf::EvsInfoLogMask + "=0x7" + "&" + Conf::EvsDebugLogMask + "=0xfff" + "&" + Conf::EvsVersion + "=" + gu::to_string(version); if (::getenv("EVS_DEBUG_MASK") != 0) { conf += "&" + Conf::EvsDebugLogMask + "=" + ::getenv("EVS_DEBUG_MASK"); } list protos; protos.push_back(new DummyTransport(uuid, false)); protos.push_back(new Proto(gu_conf, uuid, 0, conf)); return new DummyNode(gu_conf, idx, uuid, protos); } static DummyNode* create_dummy_node( size_t idx, int version, const string& suspect_timeout = "PT1H", const string& inactive_timeout = "PT1H", const string& retrans_period = "PT10M") { UUID uuid(static_cast(idx)); return create_dummy_node_with_uuid(idx, uuid, version, suspect_timeout, inactive_timeout, retrans_period); } namespace { gcomm::evs::Proto* evs_from_dummy(DummyNode* dn) { return static_cast(dn->protos().back()); } DummyTransport* transport_from_dummy(DummyNode* dn) { return static_cast(dn->protos().front()); } } static void join_node(PropagationMatrix* p, DummyNode* n, bool first = false) { gu_trace(p->insert_tp(n)); gu_trace(n->connect(first)); } static void send_n(DummyNode* node, const size_t n) { for (size_t i = 0; i < n; ++i) { gu_trace(node->send()); } } static void set_cvi(vector& nvec, size_t i_begin, size_t i_end, size_t seq) { for (size_t i = i_begin; i <= i_end; ++i) { nvec[i]->set_cvi(ViewId(V_REG, nvec[i_begin]->uuid(), static_cast(seq))); } } template class ViewSeq { public: ViewSeq() { } bool operator()(const C& a, const C& b) const { return (a->trace().current_view_trace().view().id().seq() < b->trace().current_view_trace().view().id().seq()); } }; static uint32_t get_max_view_seq(const std::vector& dnv, size_t i, size_t j) { if (i == dnv.size()) return static_cast(-1); return (*std::max_element(dnv.begin() + i, dnv.begin() + j, ViewSeq()))->trace().current_view_trace().view().id().seq(); } START_TEST(test_proto_join_n) { log_info << "START (join_n)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back(create_dummy_node(i, 0))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(false)); max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (join_n_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; // @todo This test should terminate without these timeouts const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j <= i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); for (size_t j = 0; j <= i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_lossy) { gu_conf_self_tstamp_on(); log_info << "START (join_n_lossy)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_join_n_lossy_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (join_n_lossy_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } uint32_t max_view_seq(0); for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, max_view_seq + 1); for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < i; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } max_view_seq = get_max_view_seq(dn, 0, i); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n) { gu_conf_self_tstamp_on(); log_info << "START (leave_n)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back(create_dummy_node(i, 0))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(true)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (leave_n_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT1H"); const string inactive_timeout("PT1H"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = i; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_lossy) { if (deterministic_tests()) return; gu_conf_self_tstamp_on(); log_info << "START (leave_n_lossy)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT15S"); const string inactive_timeout("PT30S"); const string retrans_period("PT1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } for (size_t i = 0; i < n_nodes; ++i) { dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); dn[i]->close(); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_leave_n_lossy_w_user_msg) { if (deterministic_tests()) return; gu_conf_self_tstamp_on(); log_info << "START (leave_n_lossy_w_user_msg)"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT15S"); const string inactive_timeout("PT30S"); const string retrans_period("PT1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = i; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, n_nodes - 1, max_view_seq + 1); dn[i]->close(); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, i + 1, n_nodes); } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST // Generic test code for split/merge cases static void test_proto_split_merge_gen(const size_t n_nodes, const bool send_msgs, const double loss) { PropagationMatrix prop; vector dn; const string suspect_timeout("PT15S"); const string inactive_timeout("PT30S"); const string retrans_period("PT1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, loss); prop.set_loss(j, i + 1, loss); } } vector split; for (size_t i = 0; i < n_nodes; ++i) { split.push_back(static_cast(i + 1)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t i = 1; i < n_nodes; ++i) { if (send_msgs == true) { for (size_t k = 0; k < 5; ++k) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 1 + j)); } gu_trace(prop.propagate_n(7)); } } log_info << "split " << i; for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { gu_trace(prop.set_loss(split[j], split[k], 0.)); gu_trace(prop.set_loss(split[k], split[j], 0.)); } } set_cvi(dn, 0, i - 1, max_view_seq + 1); set_cvi(dn, i, n_nodes - 1, max_view_seq + 1); if (send_msgs == true) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + rand() % 4)); } } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); log_info << "merge " << i; for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { gu_trace(prop.set_loss(split[j], split[k], loss)); gu_trace(prop.set_loss(split[k], split[j], loss)); } } set_cvi(dn, 0, n_nodes - 1, max_view_seq + 1); if (send_msgs == true) { for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + rand() % 4)); } } gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); } gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } START_TEST(test_proto_split_merge) { gu_conf_self_tstamp_on(); log_info << "START (split_merge)"; init_rand(); test_proto_split_merge_gen(4, false, 1.); } END_TEST START_TEST(test_proto_split_merge_lossy) { if (deterministic_tests()) return; gu_conf_self_tstamp_on(); log_info << "START (split_merge_lossy)"; init_rand(); test_proto_split_merge_gen(4, false, .9); } END_TEST START_TEST(test_proto_split_merge_w_user_msg) { gu_conf_self_tstamp_on(); log_info << "START (split_merge_w_user_msg)"; init_rand(); test_proto_split_merge_gen(4, true, 1.); } END_TEST START_TEST(test_proto_split_merge_lossy_w_user_msg) { if (deterministic_tests()) return; gu_conf_self_tstamp_on(); log_info << "START (split_merge_lossy_w_user_msg)"; init_rand(); test_proto_split_merge_gen(4, true, .9); } END_TEST START_TEST(test_proto_stop_cont) { log_info << "START"; init_rand(); const size_t n_nodes(4); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 0; j < n_nodes; ++j) { if (j != i) { dn[j]->close(dn[i]->uuid()); } } set_cvi(dn, 0, n_nodes - 1, view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); view_seq += 2; } gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_arbitrate) { log_info << "START"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT0.5S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; dn[0]->close(dn[1]->uuid()); dn[1]->close(dn[0]->uuid()); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[2]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), view_seq)); gu_trace(prop.propagate_until_cvi(true)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_proto_split_two) { log_info << "START"; const size_t n_nodes(2); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t view_seq = n_nodes + 1; dn[0]->close(dn[1]->uuid()); dn[1]->close(dn[0]->uuid()); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), view_seq)); gu_trace(prop.propagate_until_cvi(true)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[0]->uuid(), view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_aggreg) { log_info << "START"; const size_t n_nodes(2); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.31S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(send_n(dn[i], 8)); } gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_538) { gu_conf_self_tstamp_on(); log_info << "START (test_trac_538)"; init_rand(); const size_t n_nodes(5); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT2S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes - 1; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes - 1)); gu_trace(join_node(&prop, dn[n_nodes - 1], false)); for (size_t i = 1; i <= n_nodes; ++i) { if (i != n_nodes - 1) { prop.set_loss(i, n_nodes - 1, 0); prop.set_loss(n_nodes - 1, i, 0); } } set_cvi(dn, 0, n_nodes - 1, max_view_seq + 1); dn[n_nodes - 2]->set_cvi(ViewId(V_REG, n_nodes - 1, max_view_seq + 1)); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_552) { log_info << "START (trac_552)"; init_rand(); const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT15S"); const string inactive_timeout("PT30S"); const string retrans_period("PT1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { for (size_t j = 1; j < i + 1; ++j) { prop.set_loss(i + 1, j, 0.9); prop.set_loss(j, i + 1, 0.9); } } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); for (size_t j = 0; j < n_nodes; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[0]->set_cvi(V_REG); dn[1]->set_cvi(V_REG); set_cvi(dn, 2, n_nodes - 1, max_view_seq + 1); dn[0]->close(); dn[1]->close(); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_607) { gu_conf_self_tstamp_on(); log_info << "START (trac_607)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); dn[0]->set_cvi(V_REG); dn[0]->close(); while (evs_from_dummy(dn[1])->state() != Proto::S_INSTALL) { prop.propagate_n(1); } // this used to cause exception: // Forbidden state transition: INSTALL -> LEAVING (FATAL) dn[1]->close(); // expected behavior: // dn[1], dn[2] reach S_OPERATIONAL and then dn[1] leaves gracefully set_cvi(dn, 1, n_nodes - 1, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); max_view_seq = get_max_view_seq(dn, 0, n_nodes); dn[1]->set_cvi(V_REG); set_cvi(dn, 2, 2, max_view_seq + 1); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_724) { gu_conf_self_tstamp_on(); log_info << "START (trac_724)"; init_rand(); const size_t n_nodes(2); PropagationMatrix prop; vector dn; Protolay::sync_param_cb_t sync_param_cb; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } // Slightly asymmetric settings and evs.use_aggregate=false to // allow completion window to grow over 0xff. Proto* evs0(evs_from_dummy(dn[0])); bool ret(evs0->set_param("evs.use_aggregate", "false", sync_param_cb)); ck_assert(ret == true); ret = evs0->set_param("evs.send_window", "1024", sync_param_cb); ck_assert(ret == true); ret = evs0->set_param("evs.user_send_window", "515", sync_param_cb); Proto* evs1(evs_from_dummy(dn[1])); ret = evs1->set_param("evs.use_aggregate", "false", sync_param_cb); ck_assert(ret == true); ret = evs1->set_param("evs.send_window", "1024", sync_param_cb); ck_assert(ret == true); ret = evs1->set_param("evs.user_send_window", "512", sync_param_cb); prop.set_loss(1, 2, 0.); for (size_t i(0); i < 256; ++i) { dn[0]->send(); dn[0]->send(); dn[1]->send(); gu_trace(prop.propagate_until_empty()); } dn[0]->send(); prop.set_loss(1, 2, 1.); dn[0]->send(); gu_trace(prop.propagate_until_empty()); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_760) { gu_conf_self_tstamp_on(); log_info << "START (trac_760)"; init_rand(); const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(send_n(dn[i], 2)); } gu_trace(prop.propagate_until_empty()); uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); gu_trace(send_n(dn[0], 1)); gu_trace(send_n(dn[1], 1)); // gu_trace(send_n(dn[2], 1)); set_cvi(dn, 0, 1, max_view_seq + 1); dn[2]->set_cvi(V_REG); dn[2]->close(); Proto* evs0(evs_from_dummy(dn[0])); Proto* evs1(evs_from_dummy(dn[1])); while (evs1->state() != Proto::S_GATHER && evs0->state() != Proto::S_GATHER) { gu_trace(prop.propagate_n(1)); } dn[1]->close(); gu_trace(prop.propagate_until_cvi(true)); gu_trace(check_trace(dn)); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_41) { gu_conf_self_tstamp_on(); log_info << "START (gh_41)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } // Generate partitioning so that the node with smallest UUID // creates singleton view log_info << "partition"; prop.set_loss(1, 2, 0.); prop.set_loss(2, 1, 0.); prop.set_loss(1, 3, 0.); prop.set_loss(3, 1, 0.); uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), max_view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); // Merge groups and make node 1 leave so that nodes 2 and 3 see // leave message from unknown origin log_info << "merge"; prop.set_loss(1, 2, 1.); prop.set_loss(2, 1, 1.); prop.set_loss(1, 3, 1.); prop.set_loss(3, 1, 1.); // Send message so that nodes 2 and 3 shift to GATHER. This must be done // because LEAVE message is ignored in handle_foreign() dn[0]->send(); dn[0]->close(); dn[0]->set_cvi(V_REG); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 2)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 2)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_37) { gu_conf_self_tstamp_on(); log_info << "START (gh_37)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); // node 0 is gonna to leave for(size_t i = 2; i <= n_nodes; i++) { // leaving node(LN) is able to send messages to remaining nodes. // prop.set_loss(1, i, 0.); // but remaining nodes(RNS) won't be able to ack these messages. prop.set_loss(i, 1, 0.); // so RNS aru_seq are the same and higher than LN aru_seq. } // LN ss=-1, ir=[2,1] // RNS ss=1, ir=[2,1] dn[0]->send(); dn[0]->send(); dn[0]->close(); dn[0]->set_cvi(V_REG); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_40) { gu_conf_self_tstamp_on(); log_info << "START (gh_40)"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node(i, 0, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } uint32_t max_view_seq(get_max_view_seq(dn, 0, n_nodes)); // ss=0, ir=[1,0]; dn[0]->send(); gu_trace(prop.propagate_until_empty()); log_info << "gh_40 all got operational state"; // cut dn[0] from dn[1] and dn[2]. for (size_t i = 2; i <= n_nodes; ++i) { prop.set_loss(1, i, 0.); prop.set_loss(i, 1, 0.); } // ss=0, ir=[2,1]; // dn[1] send msg(seq=1) dn[1]->send(); Proto* evs1 = evs_from_dummy(dn[1]); Proto* evs2 = evs_from_dummy(dn[2]); ck_assert(evs1->state() == Proto::S_OPERATIONAL); ck_assert(evs2->state() == Proto::S_OPERATIONAL); evs1->set_inactive(dn[0]->uuid()); evs2->set_inactive(dn[0]->uuid()); evs1->check_inactive(); evs2->check_inactive(); ck_assert(evs1->state() == Proto::S_GATHER); ck_assert(evs2->state() == Proto::S_GATHER); // Advance clock to get over join message rate limiting. gu::datetime::SimClock::inc_time(100*gu::datetime::MSec); while(!(evs1->state() == Proto::S_GATHER && evs1->is_install_message())) { gu_trace(prop.propagate_n(1)); } // dn[0] comes back. // here we have to set message F_RETRANS // otherwise handle_msg ignores this msg. // @todo:why? // dn[0] ack dn[1] msg(seq=1) with flags F_RETRANS. Datagram dg1 = dn[0]->create_datagram(); UserMessage msg1(0, dn[0]->uuid(), ViewId(V_REG, dn[0]->uuid(), max_view_seq), 1, 0, 0, O_DROP, 1, 0xff, Message::F_RETRANS); // dn[0] msg(seq=2) leak into dn[1] input_map. Datagram dg2 = dn[0]->create_datagram(); UserMessage msg2(0, dn[0]->uuid(), ViewId(V_REG, dn[0]->uuid(), max_view_seq), 2, 0, 0, O_SAFE, 2, 0xff, Message::F_RETRANS); // so for dn[1] // input_map: ss=0, ir=[3,2] // install message: ss=0, ir=[2,1] // seq 1 = O_SAFE message.(initiated by self) // seq 2 = O_DROP message.(complete_user) push_header(msg1, dg1); evs1->handle_up(0, dg1, ProtoUpMeta(dn[0]->uuid())); push_header(msg2, dg2); log_info << "evs1 handle msg " << msg2; log_info << "before handle msg: " << *evs1; evs1->handle_up(0, dg2, ProtoUpMeta(dn[0]->uuid())); log_info << "after handle msg: " << *evs1; dn[0]->set_cvi(ViewId(V_REG, dn[0]->uuid(), max_view_seq + 1)); dn[1]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); dn[2]->set_cvi(ViewId(V_REG, dn[1]->uuid(), max_view_seq + 1)); prop.propagate_until_cvi(true); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gh_100) { log_info << "START (test_gh_100)"; gu::Config conf; mark_point(); gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); conf.set("evs.info_log_mask", "0x3"); conf.set("evs.debug_log_mask", "0xa0"); UUID uuid1(1), uuid2(2); DummyTransport t1(uuid1), t2(uuid2); mark_point(); DummyUser u1(conf), u2(conf); mark_point(); Proto p1(conf, uuid1, 0, gu::URI("evs://"), 10000, 0); // Start p2 view seqno from higher value than p1 View p2_rst_view(0, ViewId(V_REG, uuid2, 3)); Proto p2(conf, uuid2, 0, gu::URI("evs://"), 10000, &p2_rst_view); gcomm::connect(&t1, &p1); gcomm::connect(&p1, &u1); gcomm::connect(&t2, &p2); gcomm::connect(&p2, &u2); single_join(&t1, &p1); // The following is from double_join(). Process messages until // install message is generated. After that handle install timer // on p1 and verify that the newly generated install message has // greater install view id seqno than the first one. std::unique_ptr jm; std::unique_ptr im; std::unique_ptr im2; std::unique_ptr gm; std::unique_ptr gm2; std::unique_ptr msg; Datagram* rb; // Initial states check p2.shift_to(Proto::S_JOINING); ck_assert(p1.state() == Proto::S_OPERATIONAL); ck_assert(p2.state() == Proto::S_JOINING); // Send join message, don't self handle immediately // Expected output: one join message p2.send_join(false); ck_assert(p2.state() == Proto::S_JOINING); rb = get_msg(&t2, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(&t2, msg); ck_assert(rb == 0); // Handle node 2's join on node 1 // Expected output: shift to S_GATHER and one join message p1.handle_msg(*jm); ck_assert(p1.state() == Proto::S_GATHER); rb = get_msg(&t1, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(&t1, msg); ck_assert(rb == 0); // Handle node 1's join on node 2 // Expected output: shift to S_GATHER and one join message p2.handle_msg(*jm); ck_assert(p2.state() == Proto::S_GATHER); rb = get_msg(&t2, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(&t2, msg); ck_assert(rb == 0); // Handle node 2's join on node 1 // Expected output: Install and commit gap messages, state stays in S_GATHER p1.handle_msg(*jm); ck_assert(p1.state() == Proto::S_GATHER); rb = get_msg(&t1, im); ck_assert(rb != 0); ck_assert(im->type() == Message::EVS_T_INSTALL); rb = get_msg(&t1, gm); ck_assert(rb != 0); ck_assert(gm->type() == Message::EVS_T_GAP); ck_assert((gm->flags() & Message::F_COMMIT) != 0); rb = get_msg(&t1, msg); ck_assert(rb == 0); // Handle timers to to generate shift to GATHER p1.handle_inactivity_timer(); p1.handle_install_timer(); rb = get_msg(&t1, jm); ck_assert(rb != 0); ck_assert(jm->type() == Message::EVS_T_JOIN); rb = get_msg(&t1, im2); ck_assert(rb != 0); ck_assert(im2->type() == Message::EVS_T_INSTALL); ck_assert(im2->install_view_id().seq() > im->install_view_id().seq()); gcomm::Datagram* tmp; while ((tmp = t1.out())) delete tmp; while ((tmp = t2.out())) delete tmp; } END_TEST START_TEST(test_evs_protocol_upgrade) { log_info << "START (test_evs_protocol_upgrade)"; PropagationMatrix prop; vector dn; uint32_t view_seq(0); for (int i(0); i <= GCOMM_PROTOCOL_MAX_VERSION; ++i) { gu_trace(dn.push_back(create_dummy_node(i + 1, i))); gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, view_seq + 1); gu_trace(prop.propagate_until_cvi(false)); ++view_seq; for (int j(0); j <= i; ++j) { ck_assert(evs_from_dummy(dn[j])->current_view().version() == 0); gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } } for (int i(0); i < GCOMM_PROTOCOL_MAX_VERSION; ++i) { for (int j(i); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_REG); set_cvi(dn, i + 1, GCOMM_PROTOCOL_MAX_VERSION, view_seq); gu_trace(prop.propagate_until_cvi(true)); ++view_seq; for (int j(i + 1); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); } ck_assert(evs_from_dummy(dn[GCOMM_PROTOCOL_MAX_VERSION])->current_view().version() == GCOMM_PROTOCOL_MAX_VERSION); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_gal_521) { // Test the case where two nodes exhaust their user send windows // simultaneously. log_info << "Start test_gal_521"; std::vector dn; Protolay::sync_param_cb_t sync_param_cb; dn.push_back(create_dummy_node(1, 0)); dn.push_back(create_dummy_node(2, 0)); gcomm::evs::Proto *evs1(evs_from_dummy(dn[0])); DummyTransport* t1(transport_from_dummy(dn[0])); t1->set_queueing(true); gcomm::evs::Proto *evs2(evs_from_dummy(dn[1])); DummyTransport* t2(transport_from_dummy(dn[1])); t2->set_queueing(true); single_join(t1, evs1); double_join(t1, evs1, t2, evs2); ck_assert(t1->empty() == true); ck_assert(t2->empty() == true); // Adjust send windows to allow sending only single user generated // message at the time evs1->set_param(gcomm::Conf::EvsUserSendWindow, "1", sync_param_cb); evs1->set_param(gcomm::Conf::EvsSendWindow, "1", sync_param_cb); evs2->set_param(gcomm::Conf::EvsUserSendWindow, "1", sync_param_cb); evs2->set_param(gcomm::Conf::EvsSendWindow, "1", sync_param_cb); // Make both sides send two messages without communicating with // each other. This will place one user message into transport // queue and one into send queue for both nodes. send_n(dn[0], 2); ck_assert(t1->empty() == false); send_n(dn[1], 2); ck_assert(t2->empty() == false); Datagram *d1; std::unique_ptr um1; ck_assert((d1 = get_msg(t1, um1, false)) != 0); ck_assert(um1->type() == Message::EVS_T_USER); ck_assert(t1->empty() == true); Datagram *d2; std::unique_ptr um2; ck_assert((d2 = get_msg(t2, um2, false)) != 0); ck_assert(um2->type() == Message::EVS_T_USER); ck_assert(t2->empty() == true); // Both of the nodes handle each other's messages. Now due to // send_window == 1 they are not allowed to send the second // message since safe_seq has not been incremented. Instead, they // must emit gap messages to make safe_seq to progress. evs1->handle_up(0, *d2, ProtoUpMeta(dn[1]->uuid())); delete d2; std::unique_ptr gm1; ck_assert(get_msg(t1, gm1) != 0); ck_assert(gm1->type() == Message::EVS_T_GAP); ck_assert(t1->empty() == true); evs2->handle_up(0, *d1, ProtoUpMeta(dn[0]->uuid())); delete d1; std::unique_ptr gm2; ck_assert(get_msg(t2, gm2) != 0); ck_assert(gm2->type() == Message::EVS_T_GAP); ck_assert(t2->empty() == true); // Handle gap messages. The safe_seq is now incremented so the // second user messages are now sent from output queue. evs1->handle_msg(*gm2); ck_assert((d1 = get_msg(t1, um1, false)) != 0); ck_assert(um1->type() == Message::EVS_T_USER); ck_assert(t1->empty() == true); evs2->handle_msg(*gm1); ck_assert((d2 = get_msg(t2, um2, false)) != 0); ck_assert(um2->type() == Message::EVS_T_USER); ck_assert(t2->empty() == true); // Handle user messages. Each node should now emit gap // because the output queue is empty. evs1->handle_up(0, *d2, ProtoUpMeta(dn[1]->uuid())); delete d2; ck_assert(get_msg(t1, gm1) != 0); ck_assert(gm1->type() == Message::EVS_T_GAP); ck_assert(t1->empty() == true); evs2->handle_up(0, *d1, ProtoUpMeta(dn[0]->uuid())); delete d1; ck_assert(get_msg(t2, gm2) != 0); ck_assert(gm2->type() == Message::EVS_T_GAP); ck_assert(t2->empty() == true); // Handle gap messages. No further messages should be emitted // since both user messages have been delivered, there are // no pending user messages in the output queue and no timers // have been expired. evs1->handle_msg(*gm2); ck_assert((d1 = get_msg(t1, um1, false)) == 0); evs2->handle_msg(*gm1); ck_assert((d2 = get_msg(t2, um2, false)) == 0); std::for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST struct TwoNodeFixture { struct Configs { Configs() : conf1() , conf2() { gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); gcomm::Conf::register_params(conf2); } gu::Config conf1; // Config for node1 gu::Config conf2; // Config for node2 }; TwoNodeFixture() : conf() , uuid1(1) , uuid2(2) , tr1(uuid1) , tr2(uuid2) , evs1(conf.conf1, uuid1, 0) , evs2(conf.conf2, uuid2, 0) , top1(conf.conf1) , top2(conf.conf2) { gcomm::connect(&tr1, &evs1); gcomm::connect(&evs1, &top1); gcomm::connect(&tr2, &evs2); gcomm::connect(&evs2, &top2); single_join(&tr1, &evs1); double_join(&tr1, &evs1, &tr2, &evs2); } Configs conf; const gcomm::UUID uuid1; // UUID of node1 const gcomm::UUID uuid2; // UUID if node2 DummyTransport tr1; // Transport for node1 DummyTransport tr2; // Transport for node2 gcomm::evs::Proto evs1; // Proto for node1 gcomm::evs::Proto evs2; // Proto for node2 DummyUser top1; // Top level layer for node1 DummyUser top2; // Top level layer for node2 }; // Verify that gap messages are rate limited when a node receives // several out of order messages. START_TEST(test_gap_rate_limit) { log_info << "START test_gap_rate_limit"; // Start time from 1 sec to avoid hitting gap rate limit for the first // gap message. gu::datetime::SimClock::init(gu::datetime::Sec); gu_log_max_level = GU_LOG_DEBUG; TwoNodeFixture f; gcomm::Protolay::sync_param_cb_t spcb; // Increase evs1 send windows to allow generating out of order messages. f.evs1.set_param("evs.send_window", "4", spcb); f.evs1.set_param("evs.user_send_window", "4", spcb); // Print all debug logging on node2 for test troubleshooting. f.evs2.set_param("evs.debug_log_mask", "0xffff", spcb); f.evs2.set_param("evs.info_log_mask", "0xff", spcb); char data[1] = { 0 }; gcomm::Datagram dg(gu::SharedBuffer(new gu::Buffer(data, data + 1))); // Generate four messages from node1. The first one is ignored, // the rest are handled by node2 for generating gap messages. f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); gcomm::Datagram* read_dg; std::unique_ptr um1; read_dg = get_msg(&f.tr1, um1); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um2; read_dg = get_msg(&f.tr1, um2); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um3; read_dg = get_msg(&f.tr1, um3); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um4; read_dg = get_msg(&f.tr1, um4); ck_assert(read_dg != 0); // Make node2 handle an out of order message and verify that gap is emitted f.evs2.handle_msg(*um2); std::unique_ptr gm1; read_dg = get_msg(&f.tr2, gm1); ck_assert(read_dg != 0); ck_assert(gm1->type() == gcomm::evs::Message::EVS_T_GAP); ck_assert(gm1->range_uuid() == f.uuid1); ck_assert(gm1->range().lu() == 0); ck_assert(gm1->range().hs() == 0); // The node2 will also send an user message to complete the sequence // number. Consume it. std::unique_ptr comp_um1; read_dg = get_msg(&f.tr2, comp_um1); ck_assert(read_dg != 0); ck_assert(comp_um1->type() == gcomm::evs::Message::EVS_T_USER); ck_assert(comp_um1->seq() + comp_um1->seq_range() == 1); // No further messages should be emitted read_dg = get_msg(&f.tr2, comp_um1); ck_assert(read_dg == 0); // Handle the second out of order message, gap should not be emitted. // There will be a next user message which completes the um3. f.evs2.handle_msg(*um3); std::unique_ptr comp_um2; read_dg = get_msg(&f.tr2, comp_um2); ck_assert(read_dg != 0); ck_assert(comp_um2->type() == gcomm::evs::Message::EVS_T_USER); ck_assert(comp_um2->seq() + comp_um2->seq_range() == 2); // There should not be any more gap messages. read_dg = get_msg(&f.tr2, gm1); ck_assert(read_dg == 0); // Move the clock forwards and handle the fourth message, gap should // now emitted. gu::datetime::SimClock::inc_time(100*gu::datetime::MSec); std::unique_ptr gm2; f.evs2.handle_msg(*um4); read_dg = get_msg(&f.tr2, gm2); ck_assert(read_dg != 0); ck_assert(gm2->type() == gcomm::evs::Message::EVS_T_GAP); ck_assert(gm2->range().lu() == 0); ck_assert(gm2->range().hs() == 0); std::unique_ptr comp_u4; read_dg = get_msg(&f.tr2, comp_u4); ck_assert(read_dg != 0); ck_assert(comp_u4->type() == gcomm::evs::Message::EVS_T_USER); log_info << "END test_gap_rate_limit"; } END_TEST // Verify that gap messages are rate limited when the liveness check finds // delayed node. START_TEST(test_gap_rate_limit_delayed) { log_info << "START test_gap_rate_limit_delayed"; // Start time from 1 sec to avoid hitting gap rate limit for the first // gap message. gu::datetime::SimClock::init(gu::datetime::Sec); gu_log_max_level = GU_LOG_DEBUG; TwoNodeFixture f; gcomm::Protolay::sync_param_cb_t spcb; // Increase evs1 send windows to allow generating out of order messages. f.evs1.set_param("evs.send_window", "4", spcb); f.evs1.set_param("evs.user_send_window", "4", spcb); // Print all debug logging on node2 for test troubleshooting. f.evs2.set_param("evs.debug_log_mask", "0xffff", spcb); f.evs2.set_param("evs.info_log_mask", "0xff", spcb); // The retransmission request is done for delayed only if // auto evict is on. f.evs2.set_param("evs.auto_evict", "1", spcb); const char data[1] = { 0 }; gcomm::Datagram dg(gu::SharedBuffer(new gu::Buffer(data, data + 1))); // Generate four messages from node1. The first one is ignored, // the rest are handled by node2 for generating gap messages. f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); gcomm::Datagram* read_dg; std::unique_ptr um1; read_dg = get_msg(&f.tr1, um1); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um2; read_dg = get_msg(&f.tr1, um2); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um3; read_dg = get_msg(&f.tr1, um3); ck_assert(read_dg != 0); f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)); std::unique_ptr um4; read_dg = get_msg(&f.tr1, um4); ck_assert(read_dg != 0); // Make node2 handle an out of order message and verify that gap is emitted f.evs2.handle_msg(*um2); std::unique_ptr gm1; read_dg = get_msg(&f.tr2, gm1); ck_assert(read_dg != 0); ck_assert(gm1->type() == gcomm::evs::Message::EVS_T_GAP); ck_assert(gm1->range_uuid() == f.uuid1); ck_assert(gm1->range().lu() == 0); ck_assert(gm1->range().hs() == 0); // The node2 will also send an user message to complete the sequence // number. Consume it. std::unique_ptr comp_um1; read_dg = get_msg(&f.tr2, comp_um1); ck_assert(read_dg != 0); ck_assert(comp_um1->type() == gcomm::evs::Message::EVS_T_USER); ck_assert(comp_um1->seq() + comp_um1->seq_range() == 1); // No further messages should be emitted read_dg = get_msg(&f.tr2, comp_um1); ck_assert(read_dg == 0); // Move time forwards in 1 sec interval and make inactivity check // in between. No gap messages should be emitted. gu::datetime::SimClock::inc_time(gu::datetime::Sec); f.evs2.handle_inactivity_timer(); std::unique_ptr gm_discard; read_dg = get_msg(&f.tr2, gm_discard); ck_assert(read_dg == 0); // The clock is now advanced over retrans_period + delay margin. Next // call to handle_inactivity_timer() should fire the check. Gap message // is emitted. gu::datetime::SimClock::inc_time(gu::datetime::Sec); f.evs2.handle_inactivity_timer(); read_dg = get_msg(&f.tr2, gm1); ck_assert(read_dg != 0); ck_assert(gm1->type() == gcomm::evs::Message::EVS_T_GAP); // Now call handle_inactivity_timer() again, gap message should not // be emitted due to rate limit. // Galera 4 will run with evs protocol version 1 and will emit // delayed list at this point. f.evs2.handle_inactivity_timer(); std::unique_ptr dm; read_dg = get_msg(&f.tr2, dm); ck_assert(read_dg != 0); ck_assert(dm->type() == gcomm::evs::Message::EVS_T_DELAYED_LIST); read_dg = get_msg(&f.tr2, gm_discard); ck_assert(read_dg == 0); // Move clock forward 100msec, new gap should be now emitted. gu::datetime::SimClock::inc_time(100*gu::datetime::MSec); f.evs2.handle_inactivity_timer(); std::unique_ptr gm2; read_dg = get_msg(&f.tr2, gm2); ck_assert(read_dg != 0); ck_assert(gm2->type() == gcomm::evs::Message::EVS_T_GAP); log_info << "END test_gap_rate_limit_delayed"; gcomm::Datagram* tmp; while ((tmp = f.tr1.out())) delete tmp; while ((tmp = f.tr2.out())) delete tmp; } END_TEST START_TEST(test_out_queue_limit) { TwoNodeFixture f; std::vector data(1 << 15); gcomm::Datagram dg(gu::SharedBuffer( new gu::Buffer(data.begin(), data.end()))); // Default user send window is 2 and out queue limit is 1M, // so we can write 2 + 32 messages without blocking. for (size_t i(0); i < 34; ++i) { ck_assert(f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)) == 0); } // The next write should fill the out_queue and return EAGAIN const char small_data[1] = { 0 }; dg = gu::SharedBuffer(new gu::Buffer(small_data, small_data + 1)); ck_assert(f.evs1.handle_down(dg, ProtoDownMeta(O_SAFE)) == EAGAIN); gcomm::Datagram* tmp; while ((tmp = f.tr1.out())) delete tmp; } END_TEST // Test outline: The representative of the group is isolated out and a // new instance is joined with uuid with incremented incarnation. // The following install message should have also the old incarnation // present. This is checked by sending an user message from old // representative incarnation before isolation. If the old incarnation // is not present in the install message, the delivery of the user message // in transitional configuration will throw an exception. START_TEST(test_representative_incarnation_change) { log_info << "START test_representative_incarnation_change"; const size_t n_nodes(3); PropagationMatrix prop; vector dn; const int protocol_version(1); const string suspect_timeout("PT0.5S"); const string inactive_timeout("PT1S"); const string retrans_period("PT0.1S"); for (size_t i = 1; i <= n_nodes; ++i) { gu_trace(dn.push_back( create_dummy_node_with_uuid( i, gcomm::UUID(i), protocol_version, suspect_timeout, inactive_timeout, retrans_period))); } for (size_t i = 0; i < n_nodes; ++i) { gu_trace(join_node(&prop, dn[i], i == 0 ? true : false)); set_cvi(dn, 0, i, i + 1); gu_trace(prop.propagate_until_cvi(false)); } prop.propagate_until_empty(); // Send a message from the representative and propagate enough messages // to make sure that other nodes received the message but didn't deliver // yet. dn[0]->send(); prop.propagate_n(3); // Isolate the representative. for (size_t i = 2; i <= n_nodes; ++i) { prop.set_loss(1, i, 0.); prop.set_loss(i, 1, 0.); } // Shift clock and handle timers to bring other nodes into gather // state. gu::datetime::SimClock::inc_time(300 * gu::datetime::MSec); evs_from_dummy(dn[1])->handle_timers(); evs_from_dummy(dn[2])->handle_timers(); prop.propagate_until_empty(); gu::datetime::SimClock::inc_time(300 * gu::datetime::MSec); evs_from_dummy(dn[1])->handle_timers(); evs_from_dummy(dn[2])->handle_timers(); ck_assert(evs_from_dummy(dn[1])->state() == gcomm::evs::Proto::S_GATHER); ck_assert(evs_from_dummy(dn[2])->state() == gcomm::evs::Proto::S_GATHER); // Create a new instance with old representative uuid with incarnation // incremented. Keep it isolated from old representative. gcomm::UUID uuid_new_incarnation(evs_from_dummy(dn[0])->uuid()); uuid_new_incarnation.increment_incarnation(); dn.push_back(create_dummy_node_with_uuid(4, uuid_new_incarnation, protocol_version, suspect_timeout, inactive_timeout, retrans_period)); join_node(&prop, dn[3], false); prop.set_loss(1, 4, 0.); prop.set_loss(4, 1, 0.); prop.propagate_until_empty(); std::for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST Suite* evs2_suite() { Suite* s = suite_create("gcomm::evs"); TCase* tc; tc = tcase_create("test_range"); tcase_add_test(tc, test_range); suite_add_tcase(s, tc); tc = tcase_create("test_message"); tcase_add_test(tc, test_message); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_insert"); tcase_add_test(tc, test_input_map_insert); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_find"); tcase_add_test(tc, test_input_map_find); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_safety"); tcase_add_test(tc, test_input_map_safety); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_erase"); tcase_add_test(tc, test_input_map_erase); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_overwrap"); tcase_add_test(tc, test_input_map_overwrap); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_random_insert"); tcase_add_test(tc, test_input_map_random_insert); suite_add_tcase(s, tc); tc = tcase_create("test_input_map_gap_range_list"); tcase_add_test(tc, test_input_map_gap_range_list); suite_add_tcase(s, tc); tc = tcase_create("test_proto_single_join"); tcase_add_test(tc, test_proto_single_join); suite_add_tcase(s, tc); tc = tcase_create("test_proto_double_join"); tcase_add_test(tc, test_proto_double_join); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n"); tcase_add_test(tc, test_proto_join_n); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_w_user_msg"); tcase_add_test(tc, test_proto_join_n_w_user_msg); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_lossy"); tcase_add_test(tc, test_proto_join_n_lossy); suite_add_tcase(s, tc); tc = tcase_create("test_proto_join_n_lossy_w_user_msg"); tcase_add_test(tc, test_proto_join_n_lossy_w_user_msg); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n"); tcase_add_test(tc, test_proto_leave_n); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_w_user_msg"); tcase_add_test(tc, test_proto_leave_n_w_user_msg); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_lossy"); tcase_add_test(tc, test_proto_leave_n_lossy); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_proto_leave_n_lossy_w_user_msg"); tcase_add_test(tc, test_proto_leave_n_lossy_w_user_msg); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge"); tcase_add_test(tc, test_proto_split_merge); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_lossy"); tcase_add_test(tc, test_proto_split_merge_lossy); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_w_user_msg"); tcase_add_test(tc, test_proto_split_merge_w_user_msg); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_merge_lossy_w_user_msg"); tcase_add_test(tc, test_proto_split_merge_lossy_w_user_msg); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("test_proto_stop_cont"); tcase_add_test(tc, test_proto_stop_cont); tcase_set_timeout(tc, 10); suite_add_tcase(s, tc); tc = tcase_create("test_proto_split_two"); tcase_add_test(tc, test_proto_split_two); suite_add_tcase(s, tc); tc = tcase_create("test_aggreg"); tcase_add_test(tc, test_aggreg); suite_add_tcase(s, tc); tc = tcase_create("test_proto_arbitrate"); tcase_add_test(tc, test_proto_arbitrate); suite_add_tcase(s, tc); tc = tcase_create("test_trac_538"); tcase_add_test(tc, test_trac_538); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_552"); tcase_add_test(tc, test_trac_552); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_607"); tcase_add_test(tc, test_trac_607); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_724"); tcase_add_test(tc, test_trac_724); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_trac_760"); tcase_add_test(tc, test_trac_760); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_41"); tcase_add_test(tc, test_gh_41); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_37"); tcase_add_test(tc, test_gh_37); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_gh_40"); tcase_add_test(tc, test_gh_40); tcase_set_timeout(tc, 5); suite_add_tcase(s, tc); tc = tcase_create("test_gh_100"); tcase_add_test(tc, test_gh_100); suite_add_tcase(s, tc); tc = tcase_create("test_evs_protocol_upgrade"); tcase_add_test(tc, test_evs_protocol_upgrade); suite_add_tcase(s, tc); tc = tcase_create("test_gal_521"); tcase_add_test(tc, test_gal_521); suite_add_tcase(s, tc); tc = tcase_create("test_gap_rate_limit"); tcase_add_test(tc, test_gap_rate_limit); suite_add_tcase(s, tc); tc = tcase_create("test_gap_rate_limit_delayed"); tcase_add_test(tc, test_gap_rate_limit_delayed); suite_add_tcase(s, tc); tc = tcase_create("test_out_queue_limit"); tcase_add_test(tc, test_out_queue_limit); suite_add_tcase(s, tc); tc = tcase_create("test_representative_incarnation_change"); tcase_add_test(tc, test_representative_incarnation_change); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_pc.cpp000644 000164 177776 00000367333 15107057155 021057 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2024 Codership Oy */ #include "check_gcomm.hpp" #include "pc_message.hpp" #include "pc_proto.hpp" #include "evs_proto.hpp" #include "check_templ.hpp" #include "check_trace.hpp" #include "gcomm/conf.hpp" #include "gu_errno.h" #include "gu_asio.hpp" // gu::ssl_register_params() #include #include #include #include using namespace std; using namespace std::rel_ops; using namespace gu::datetime; using namespace gcomm; using namespace gcomm::pc; using gu::byte_t; using gu::Buffer; using gu::Exception; using gu::URI; using gu::DeleteObject; START_TEST(test_pc_messages) { StateMessage pcs(0); pc::NodeMap& sim(pcs.node_map()); sim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, false, 6, ViewId(V_PRIM, UUID(0, 0), 9), 42, -1))); sim.insert(std::make_pair(UUID(0,0), pc::Node(false, true, false, 88, ViewId(V_PRIM, UUID(0, 0), 3), 472, 0))); sim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, true, 78, ViewId(V_PRIM, UUID(0, 0), 87), 52, 1))); size_t expt_size = 4 // hdr + 4 // seq + 4 + 3*(UUID::serial_size() + sizeof(uint32_t) + 4 + 20 + 8); // NodeMap check_serialization(pcs, expt_size, StateMessage(-1)); InstallMessage pci(0); pc::NodeMap& iim = pci.node_map(); iim.insert(std::make_pair(UUID(0,0), pc::Node(true, true, true, 6, ViewId(V_PRIM, UUID(0, 0), 9), 42, -1))); iim.insert(std::make_pair(UUID(0,0), pc::Node(false, false, false, 88, ViewId(V_NON_PRIM, UUID(0, 0), 3), 472, 0))); iim.insert(std::make_pair(UUID(0,0), pc::Node(true, false, false, 78, ViewId(V_PRIM, UUID(0, 0), 87), 52, 1))); iim.insert(std::make_pair(UUID(0,0), pc::Node(false, true, true, 457, ViewId(V_NON_PRIM, UUID(0, 0), 37), 56, 0xff))); expt_size = 4 // hdr + 4 // seq + 4 + 4*(UUID::serial_size() + sizeof(uint32_t) + 4 + 20 + 8); // NodeMap check_serialization(pci, expt_size, InstallMessage(-1)); UserMessage pcu(0, 7); pcu.checksum(0xfefe, true); expt_size = 4 + 4; check_serialization(pcu, expt_size, UserMessage(-1, -1U)); ck_assert(pcu.serial_size() % 4 == 0); } END_TEST class PCUser : public Toplay { public: PCUser(gu::Config& conf, const UUID& uuid, DummyTransport *tp, Proto* pc) : Toplay(conf), views_(), uuid_(uuid), tp_(tp), pc_(pc) { gcomm::connect(tp_, pc_); gcomm::connect(pc_, this); } const UUID& uuid() const { return uuid_; } DummyTransport* tp() { return tp_; } Proto* pc() { return pc_; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (um.has_view() == true) { const View& view(um.view()); log_info << view; ck_assert(view.type() == V_PRIM || view.type() == V_NON_PRIM); views_.push_back(View(view)); } } void send() { byte_t pl[4] = {1, 2, 3, 4}; Buffer buf(pl, pl + sizeof(pl)); Datagram dg(buf); ck_assert(send_down(dg, ProtoDownMeta()) == 0); } private: PCUser(const PCUser&); void operator=(const PCUser&); list views_; UUID uuid_; DummyTransport* tp_; Proto* pc_; }; void get_msg(Datagram* rb, Message* msg, bool release = true) { assert(msg != 0); if (rb == 0) { log_info << "get_msg: (null)"; } else { // assert(rb->get_header().size() == 0 && rb->get_offset() == 0); const byte_t* begin(gcomm::begin(*rb)); const size_t available(gcomm::available(*rb)); ck_assert(msg->unserialize(begin, available, 0) != 0); log_info << "get_msg: " << msg->to_string(); if (release) delete rb; } } void single_boot(int version, PCUser* pu1) { ProtoUpMeta sum1(pu1->uuid()); View vt0(version, ViewId(V_TRANS, pu1->uuid(), 0)); vt0.add_member(pu1->uuid(), 0); ProtoUpMeta um1(UUID::nil(), ViewId(), &vt0); pu1->pc()->connect(true); // pu1->pc()->shift_to(Proto::S_JOINING); pu1->pc()->handle_up(0, Datagram(), um1); ck_assert(pu1->pc()->state() == Proto::S_TRANS); View vr1(version, ViewId(V_REG, pu1->uuid(), 1)); vr1.add_member(pu1->uuid(), 0); ProtoUpMeta um2(UUID::nil(), ViewId(), &vr1); pu1->pc()->handle_up(0, Datagram(), um2); ck_assert(pu1->pc()->state() == Proto::S_STATES_EXCH); Datagram* rb = pu1->tp()->out(); ck_assert(rb != 0); Message sm1; get_msg(rb, &sm1); ck_assert(sm1.type() == Message::PC_T_STATE); ck_assert(sm1.node_map().size() == 1); { const pc::Node& pi1 = pc::NodeMap::value(sm1.node_map().begin()); ck_assert(pi1.prim() == true); ck_assert(pi1.last_prim() == ViewId(V_PRIM, pu1->uuid(), 0)); } pu1->pc()->handle_msg(sm1, Datagram(), sum1); ck_assert(pu1->pc()->state() == Proto::S_INSTALL); rb = pu1->tp()->out(); ck_assert(rb != 0); Message im1; get_msg(rb, &im1); ck_assert(im1.type() == Message::PC_T_INSTALL); ck_assert(im1.node_map().size() == 1); { const pc::Node& pi1 = pc::NodeMap::value(im1.node_map().begin()); ck_assert(pi1.prim() == true); ck_assert(pi1.last_prim() == ViewId(V_PRIM, pu1->uuid(), 0)); } pu1->pc()->handle_msg(im1, Datagram(), sum1); ck_assert(pu1->pc()->state() == Proto::S_PRIM); } START_TEST(test_pc_view_changes_single) { log_info << "START (test_pc_view_changes_single)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(0, 0); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); } END_TEST static void double_boot(int version, PCUser* pu1, PCUser* pu2) { ProtoUpMeta pum1(pu1->uuid()); ProtoUpMeta pum2(pu2->uuid()); View t11(version, ViewId(V_TRANS, pu1->pc()->current_view().id())); t11.add_member(pu1->uuid(), 0); pu1->pc()->handle_view(t11); ck_assert(pu1->pc()->state() == Proto::S_TRANS); View t12(version, ViewId(V_TRANS, pu2->uuid(), 0)); t12.add_member(pu2->uuid(), 0); // pu2->pc()->shift_to(Proto::S_JOINING); pu2->pc()->connect(false); pu2->pc()->handle_view(t12); ck_assert(pu2->pc()->state() == Proto::S_TRANS); View r1(version, ViewId(V_REG, pu1->uuid(), pu1->pc()->current_view().id().seq() + 1)); r1.add_member(pu1->uuid(), 0); r1.add_member(pu2->uuid(), 0); pu1->pc()->handle_view(r1); ck_assert(pu1->pc()->state() == Proto::S_STATES_EXCH); pu2->pc()->handle_view(r1); ck_assert(pu2->pc()->state() == Proto::S_STATES_EXCH); Datagram* rb = pu1->tp()->out(); ck_assert(rb != 0); Message sm1; get_msg(rb, &sm1); ck_assert(sm1.type() == Message::PC_T_STATE); rb = pu2->tp()->out(); ck_assert(rb != 0); Message sm2; get_msg(rb, &sm2); ck_assert(sm2.type() == Message::PC_T_STATE); rb = pu1->tp()->out(); ck_assert(rb == 0); rb = pu2->tp()->out(); ck_assert(rb == 0); pu1->pc()->handle_msg(sm1, Datagram(), pum1); rb = pu1->tp()->out(); ck_assert(rb == 0); ck_assert(pu1->pc()->state() == Proto::S_STATES_EXCH); pu1->pc()->handle_msg(sm2, Datagram(), pum2); ck_assert(pu1->pc()->state() == Proto::S_INSTALL); pu2->pc()->handle_msg(sm1, Datagram(), pum1); rb = pu2->tp()->out(); ck_assert(rb == 0); ck_assert(pu2->pc()->state() == Proto::S_STATES_EXCH); pu2->pc()->handle_msg(sm2, Datagram(), pum2); ck_assert(pu2->pc()->state() == Proto::S_INSTALL); Message im1; UUID imsrc; if (pu1->uuid() < pu2->uuid()) { rb = pu1->tp()->out(); imsrc = pu1->uuid(); } else { rb = pu2->tp()->out(); imsrc = pu2->uuid(); } ck_assert(rb != 0); get_msg(rb, &im1); ck_assert(im1.type() == Message::PC_T_INSTALL); ck_assert(pu1->tp()->out() == 0); ck_assert(pu2->tp()->out() == 0); ProtoUpMeta ipum(imsrc); pu1->pc()->handle_msg(im1, Datagram(), ipum); ck_assert(pu1->pc()->state() == Proto::S_PRIM); pu2->pc()->handle_msg(im1, Datagram(), ipum); ck_assert(pu2->pc()->state() == Proto::S_PRIM); } // Form PC for three instances. static void triple_boot(int version, PCUser* pu1, PCUser* pu2, PCUser* pu3) { ck_assert(pu1->uuid() < pu2->uuid() && pu2->uuid() < pu3->uuid()); // trans views { View tr12(version, ViewId(V_TRANS, pu1->pc()->current_view().id())); tr12.add_member(pu1->uuid(), 0); tr12.add_member(pu2->uuid(), 0); ProtoUpMeta trum12(UUID::nil(), ViewId(), &tr12); pu1->pc()->handle_up(0, Datagram(), trum12); pu2->pc()->handle_up(0, Datagram(), trum12); ck_assert(pu1->pc()->state() == Proto::S_TRANS); ck_assert(pu2->pc()->state() == Proto::S_TRANS); pu3->pc()->connect(false); View tr3(version, ViewId(V_TRANS, pu3->uuid(), 0)); tr3.add_member(pu3->uuid(), 0); ProtoUpMeta trum3(UUID::nil(), ViewId(), &tr3); pu3->pc()->handle_up(0, Datagram(), trum3); ck_assert(pu3->pc()->state() == Proto::S_TRANS); } // reg view { View reg(version, ViewId(V_REG, pu1->uuid(), pu1->pc()->current_view().id().seq() + 1)); reg.add_member(pu1->uuid(), 0); reg.add_member(pu2->uuid(), 0); reg.add_member(pu3->uuid(), 0); ProtoUpMeta pum(UUID::nil(), ViewId(), ®); pu1->pc()->handle_up(0, Datagram(), pum); pu2->pc()->handle_up(0, Datagram(), pum); pu3->pc()->handle_up(0, Datagram(), pum); ck_assert(pu1->pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2->pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3->pc()->state() == Proto::S_STATES_EXCH); } // states exch { Datagram* dg(pu1->tp()->out()); ck_assert(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); delete dg; dg = pu2->tp()->out(); ck_assert(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu2->uuid())); delete dg; dg = pu3->tp()->out(); ck_assert(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu3->uuid())); delete dg; ck_assert(pu1->pc()->state() == Proto::S_INSTALL); ck_assert(pu2->pc()->state() == Proto::S_INSTALL); ck_assert(pu3->pc()->state() == Proto::S_INSTALL); } // install { Datagram* dg(pu1->tp()->out()); ck_assert(dg != 0); pu1->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu2->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); pu3->pc()->handle_up(0, *dg, ProtoUpMeta(pu1->uuid())); delete dg; ck_assert(pu1->pc()->state() == Proto::S_PRIM); ck_assert(pu2->pc()->state() == Proto::S_PRIM); ck_assert(pu3->pc()->state() == Proto::S_PRIM); } } START_TEST(test_pc_view_changes_double) { log_info << "START (test_pc_view_changes_double)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); Datagram* rb; View tnp(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tnp.add_member(uuid1, 0); pu1.pc()->handle_view(tnp); ck_assert(pu1.pc()->state() == Proto::S_TRANS); View reg(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); pu1.pc()->handle_view(reg); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); rb = pu1.tp()->out(); ck_assert(rb != 0); pu1.pc()->handle_up(0, *rb, ProtoUpMeta(uuid1)); ck_assert(pu1.pc()->state() == Proto::S_NON_PRIM); delete rb; View tpv2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tpv2.add_member(uuid2, 0); tpv2.add_left(uuid1, 0); pu2.pc()->handle_view(tpv2); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu2.tp()->out() == 0); View rp2(0, ViewId(V_REG, uuid2, pu1.pc()->current_view().id().seq() + 1)); rp2.add_member(uuid2, 0); rp2.add_left(uuid1, 0); pu2.pc()->handle_view(rp2); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); rb = pu2.tp()->out(); ck_assert(rb != 0); Message sm2; get_msg(rb, &sm2); ck_assert(sm2.type() == Message::PC_T_STATE); ck_assert(pu2.tp()->out() == 0); pu2.pc()->handle_msg(sm2, Datagram(), pum2); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); rb = pu2.tp()->out(); ck_assert(rb != 0); Message im2; get_msg(rb, &im2); ck_assert(im2.type() == Message::PC_T_INSTALL); pu2.pc()->handle_msg(im2, Datagram(), pum2); ck_assert(pu2.pc()->state() == Proto::S_PRIM); } END_TEST /* Test that UUID ordering does not matter when starting nodes */ START_TEST(test_pc_view_changes_reverse) { log_info << "START (test_pc_view_changes_reverse)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); double_boot(0, &pu2, &pu1); } END_TEST START_TEST(test_pc_state1) { log_info << "START (test_pc_state1)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> TRANS -> STATES_EXCH -> RTR -> TRANS -> STATES_EXCH -> RTR ->PRIM View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_member(uuid2, 0); pu1.pc()->handle_view(tr1); pu2.pc()->handle_view(tr1); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu1.tp()->out() == 0); ck_assert(pu2.tp()->out() == 0); View reg2(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg2.add_member(uuid1, 0); reg2.add_member(uuid2, 0); pu1.pc()->handle_view(reg2); pu2.pc()->handle_view(reg2); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_INSTALL); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); View tr2(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr2.add_member(uuid1, 0); tr2.add_member(uuid2, 0); pu1.pc()->handle_view(tr2); pu2.pc()->handle_view(tr2); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_INSTALL); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_state2) { log_info << "START (test_pc_state2)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> TRANS -> STATES_EXCH -> TRANS -> STATES_EXCH -> RTR -> PRIM View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_member(uuid2, 0); pu1.pc()->handle_view(tr1); pu2.pc()->handle_view(tr1); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu1.tp()->out() == 0); ck_assert(pu2.tp()->out() == 0); View reg2(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg2.add_member(uuid1, 0); reg2.add_member(uuid2, 0); pu1.pc()->handle_view(reg2); pu2.pc()->handle_view(reg2); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); View tr2(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr2.add_member(uuid1, 0); tr2.add_member(uuid2, 0); pu1.pc()->handle_view(tr2); pu2.pc()->handle_view(tr2); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_INSTALL); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_state3) { log_info << "START (test_pc_state3)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); // n1: PRIM -> TRANS -> STATES_EXCH -> RTR -> PRIM // n2: JOINING -> STATES_EXCH -> RTR -> PRIM double_boot(0, &pu1, &pu2); ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); // PRIM -> NON_PRIM -> STATES_EXCH -> RTR -> NON_PRIM -> STATES_EXCH -> ... // -> NON_PRIM -> STATES_EXCH -> RTR -> NON_PRIM View tr11(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr11.add_member(uuid1, 0); pu1.pc()->handle_view(tr11); View tr12(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr12.add_member(uuid2, 0); pu2.pc()->handle_view(tr12); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu1.tp()->out() == 0); ck_assert(pu2.tp()->out() == 0); View reg21(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg21.add_member(uuid1, 0); pu1.pc()->handle_view(reg21); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); View reg22(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg22.add_member(uuid2, 0); pu2.pc()->handle_view(reg22); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); Message msg; get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); get_msg(pu2.tp()->out(), &msg); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_NON_PRIM); ck_assert(pu2.pc()->state() == Proto::S_NON_PRIM); View tr21(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr21.add_member(uuid1, 0); pu1.pc()->handle_view(tr21); View tr22(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr22.add_member(uuid2, 0); pu2.pc()->handle_view(tr22); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu1.tp()->out() == 0); ck_assert(pu2.tp()->out() == 0); View reg3(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg3.add_member(uuid1, 0); reg3.add_member(uuid2, 0); pu1.pc()->handle_view(reg3); pu2.pc()->handle_view(reg3); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu1.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum1); pu2.pc()->handle_msg(msg, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); get_msg(pu2.tp()->out(), &msg); pu1.pc()->handle_msg(msg, Datagram(), pum2); pu2.pc()->handle_msg(msg, Datagram(), pum2); ck_assert(pu1.pc()->state() == Proto::S_INSTALL); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); Message im; if (uuid1 < uuid2) { get_msg(pu1.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum1); pu2.pc()->handle_msg(im, Datagram(), pum1); } else { get_msg(pu2.tp()->out(), &im); pu1.pc()->handle_msg(im, Datagram(), pum2); pu2.pc()->handle_msg(im, Datagram(), pum2); } ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_conflicting_prims) { log_info << "START (test_pc_conflicting_prims)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); pu1.pc()->handle_view(tr1); View tr2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr2.add_member(uuid2, 0); pu2.pc()->handle_view(tr2); View reg(0, ViewId(V_REG, uuid1, tr1.id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); pu1.pc()->handle_view(reg); pu2.pc()->handle_view(reg); Message msg1, msg2; /* First node must discard msg2 and stay in states exch waiting for * trans view */ get_msg(pu1.tp()->out(), &msg1); get_msg(pu2.tp()->out(), &msg2); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); pu1.pc()->handle_msg(msg1, Datagram(), pum1); pu1.pc()->handle_msg(msg2, Datagram(), pum2); /* Second node must abort */ try { pu2.pc()->handle_msg(msg1, Datagram(), pum1); ck_abort_msg("not aborted"); } catch (Exception& e) { log_info << e.what(); } ck_assert(pu1.tp()->out() == 0); View tr3(0, ViewId(V_TRANS, reg.id())); tr3.add_member(uuid1, 0); pu1.pc()->handle_view(tr3); View reg3(0, ViewId(V_REG, uuid1, tr3.id().seq() + 1)); reg3.add_member(uuid1, 0); pu1.pc()->handle_view(reg3); get_msg(pu1.tp()->out(), &msg1); pu1.pc()->handle_msg(msg1, Datagram(), pum1); get_msg(pu1.tp()->out(), &msg1); pu1.pc()->handle_msg(msg1, Datagram(), pum1); ck_assert(pu1.pc()->state() == Proto::S_PRIM); } END_TEST START_TEST(test_pc_conflicting_prims_npvo) { log_info << "START (test_pc_conflicting_npvo)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0, URI("pc://?pc.npvo=true")); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf, uuid2, 0, URI("pc://?pc.npvo=true")); DummyTransport tp2; PCUser pu2(conf, uuid2, &tp2, &pc2); single_boot(0, &pu2); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); pu1.pc()->handle_view(tr1); View tr2(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr2.add_member(uuid2, 0); pu2.pc()->handle_view(tr2); View reg(0, ViewId(V_REG, uuid1, tr1.id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); pu1.pc()->handle_view(reg); pu2.pc()->handle_view(reg); Message msg1, msg2; /* First node must discard msg2 and stay in states exch waiting for * trans view */ get_msg(pu1.tp()->out(), &msg1); get_msg(pu2.tp()->out(), &msg2); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); pu1.pc()->handle_msg(msg1, Datagram(), pum1); pu2.pc()->handle_msg(msg1, Datagram(), pum1); /* First node must abort */ try { pu1.pc()->handle_msg(msg2, Datagram(), pum2); ck_abort_msg("not aborted"); } catch (Exception& e) { log_info << e.what(); } ck_assert(pu2.tp()->out() == 0); View tr3(0, ViewId(V_TRANS, reg.id())); tr3.add_member(uuid2, 0); pu2.pc()->handle_view(tr3); View reg3(0, ViewId(V_REG, uuid2, tr3.id().seq() + 1)); reg3.add_member(uuid2, 0); pu2.pc()->handle_view(reg3); get_msg(pu2.tp()->out(), &msg2); pu2.pc()->handle_msg(msg2, Datagram(), pum2); get_msg(pu2.tp()->out(), &msg2); pu2.pc()->handle_msg(msg2, Datagram(), pum2); ck_assert(pu2.pc()->state() == Proto::S_PRIM); } END_TEST static void join_node(PropagationMatrix* p, DummyNode* n, bool first) { log_info << first; gu_trace(p->insert_tp(n)); gu_trace(n->connect(first)); } static void send_n(DummyNode* node, const size_t n) { for (size_t i = 0; i < n; ++i) { gu_trace(node->send()); } } static void set_cvi(vector& nvec, size_t i_begin, size_t i_end, size_t seq, ViewType type) { for (size_t i = i_begin; i <= i_end; ++i) { nvec[i]->set_cvi(ViewId(type, type == V_NON_PRIM ? nvec[0]->uuid() : nvec[i_begin]->uuid(), static_cast(type == V_NON_PRIM ? seq - 1 : seq))); } } struct InitGuConf { explicit InitGuConf(gu::Config& conf) { gcomm::Conf::register_params(conf); } }; static gu::Config& static_gu_conf() { static gu::Config conf; static InitGuConf init(conf); return conf; } static DummyNode* create_dummy_node(size_t idx, int version, const string& suspect_timeout = "PT1H", const string& inactive_timeout = "PT1H", const string& retrans_period = "PT20M", int weight = 1) { gu::Config& gu_conf(static_gu_conf()); gu::ssl_register_params(gu_conf); gcomm::Conf::register_params(gu_conf); const string conf = "evs://?" + Conf::EvsViewForgetTimeout + "=PT1H&" + Conf::EvsInactiveCheckPeriod + "=" + to_string(Period(suspect_timeout)/3) + "&" + Conf::EvsSuspectTimeout + "=" + suspect_timeout + "&" + Conf::EvsInactiveTimeout + "=" + inactive_timeout + "&" + Conf::EvsKeepalivePeriod + "=" + retrans_period + "&" + Conf::EvsJoinRetransPeriod + "=" + retrans_period + "&" + Conf::EvsInstallTimeout + "=" + inactive_timeout + "&" + Conf::PcWeight + "=" + gu::to_string(weight) + "&" + Conf::EvsVersion + "=" + gu::to_string(version) + "&" + Conf::EvsInfoLogMask + "=" + "0x3"; list protos; UUID uuid(static_cast(idx)); protos.push_back(new DummyTransport(uuid, false)); protos.push_back(new evs::Proto(gu_conf, uuid, 0, conf)); protos.push_back(new Proto(gu_conf, uuid, 0, conf)); return new DummyNode(gu_conf, idx, gcomm::UUID(idx), protos); } namespace { gcomm::pc::Proto* pc_from_dummy(DummyNode* dn) { return reinterpret_cast(dn->protos().back()); } } static ViewType view_type(const size_t i_begin, const size_t i_end, const size_t n_nodes) { return (((i_end - i_begin + 1)*2 > n_nodes) ? V_PRIM : V_NON_PRIM); } START_TEST(test_pc_split_merge) { log_info << "START (test_pc_split_merge)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; mark_point(); for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } mark_point(); for (size_t i = 1; i < n_nodes; ++i) { for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.split(j + 1, k + 1); } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, i - 1, view_seq, view_type(0, i - 1, n_nodes)); set_cvi(dn, i, n_nodes - 1, view_seq, view_type(i,n_nodes - 1,n_nodes)); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.merge(j + 1, k + 1); } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } mark_point(); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_split_merge_w_user_msg) { log_info << "START (test_pc_split_merge_w_user_msg)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 1; i < n_nodes; ++i) { for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.split(j + 1, k + 1); } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, i - 1, view_seq, view_type(0, i - 1, n_nodes)); set_cvi(dn, i, n_nodes - 1, view_seq, view_type(i, n_nodes - 1, n_nodes)); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } for (size_t j = 0; j < i; ++j) { for (size_t k = i; k < n_nodes; ++k) { prop.merge(j + 1, k + 1); } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_complete_split_merge) { log_info << "START (test_pc_complete_split_merge)"; size_t n_nodes(5); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.31S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); log_info << "i " << i; gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i = 0; i < 5; ++i) { for (size_t j = 0; j < n_nodes; ++j) { send_n(dn[j], ::rand() % 5); } prop.propagate_n(9 + ::rand() % 5); for (size_t j = 0; j < n_nodes; ++j) { for (size_t k = 0; k < n_nodes; ++k) { if (j != k) { prop.split(j + 1, k + 1); } } } ++view_seq; log_info << "split " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); for (size_t j = 0; j < n_nodes; ++j) { for (size_t k = 0; k < n_nodes; ++k) { if (j != k) { prop.merge(j + 1, k + 1); } } } ++view_seq; log_info << "merge " << i << " view seq " << view_seq; set_cvi(dn, 0, n_nodes - 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); } check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_pc_protocol_upgrade) { log_info << "START (test_pc_protocol_upgrade)"; vector dn; PropagationMatrix prop; uint32_t view_seq(0); for (int i(0); i <= GCOMM_PROTOCOL_MAX_VERSION; ++i) { dn.push_back(create_dummy_node(i + 1, i)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); ++view_seq; for (int j(0); j <= i; ++j) { ck_assert(pc_from_dummy(dn[j])->current_view().version() == 0); gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } } for (int i(0); i < GCOMM_PROTOCOL_MAX_VERSION; ++i) { for (int j(i); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } dn[i]->close(); dn[i]->set_cvi(V_NON_PRIM); set_cvi(dn, i + 1, GCOMM_PROTOCOL_MAX_VERSION, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); ++view_seq; for (int j(i + 1); j <= GCOMM_PROTOCOL_MAX_VERSION; ++j) { gu_trace(send_n(dn[j], 5 + ::rand() % 4)); } gu_trace(prop.propagate_until_empty()); } ck_assert(pc_from_dummy(dn[GCOMM_PROTOCOL_MAX_VERSION])->current_view().version() == GCOMM_PROTOCOL_MAX_VERSION); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_trac_191) { log_info << "START (test_trac_191)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1), uuid2(2), uuid3(3), uuid4(4); Proto p(conf, uuid4, 0); DummyTransport tp(uuid4, true); // gcomm::connect(&tp, &p); PCUser pu(conf, uuid4, &tp, &p); p.shift_to(Proto::S_NON_PRIM); View t0(0, ViewId(V_TRANS, uuid4, 0)); t0.add_member(uuid4, 0); p.handle_view(t0); View r5(0, ViewId(V_REG, uuid2, 5)); r5.add_member(uuid3, 0); r5.add_member(uuid4, 0); p.handle_view(r5); Datagram* dg = tp.out(); ck_assert(dg != 0); Message sm4; get_msg(dg, &sm4); ck_assert(sm4.type() == Message::PC_T_STATE); // Handle first sm from uuid3 StateMessage sm3(0); pc::NodeMap& im3(sm3.node_map()); im3.insert_unique(make_pair(uuid1, pc::Node(true, false, false, 254, ViewId(V_PRIM, uuid1, 3), 20))); im3.insert_unique(make_pair(uuid2, pc::Node(true, false, false, 254, ViewId(V_PRIM, uuid1, 3), 20))); im3.insert_unique(make_pair(uuid3, pc::Node(false, false, false, 254, ViewId(V_PRIM, uuid1, 3), 25))); p.handle_msg(sm3, Datagram(), ProtoUpMeta(uuid3)); p.handle_msg(sm4, Datagram(), ProtoUpMeta(uuid4)); } END_TEST START_TEST(test_trac_413) { log_info << "START (test_trac_413)"; class TN : gcomm::Toplay // test node { public: TN(gu::Config conf, const UUID& uuid) : Toplay(conf), p_(conf, uuid, 0), tp_(uuid, true) { gcomm::connect(&tp_, &p_); gcomm::connect(&p_, this); } const UUID& uuid() const { return p_.uuid(); } gcomm::pc::Proto& p() { return p_; } DummyTransport& tp() { return tp_; } void handle_up(const void* id, const Datagram& dg, const gcomm::ProtoUpMeta& um) { // void } private: pc::Proto p_; DummyTransport tp_; }; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); TN n1(conf, 1), n2(conf, 2), n3(conf, 3); // boot to first prim { gcomm::View tr(0, ViewId(V_TRANS, n1.uuid(), 0)); tr.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().connect(true); n1.p().handle_view(tr); Datagram* dg(n1.tp().out()); ck_assert(dg == 0 && n1.p().state() == gcomm::pc::Proto::S_TRANS); gcomm::View reg(0, ViewId(V_REG, n1.uuid(), 1)); reg.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().handle_view(reg); dg = n1.tp().out(); ck_assert(dg != 0 && n1.p().state() == gcomm::pc::Proto::S_STATES_EXCH); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n1.tp().out(); ck_assert(dg != 0 && n1.p().state() == gcomm::pc::Proto::S_INSTALL); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n1.tp().out(); ck_assert(dg == 0 && n1.p().state() == gcomm::pc::Proto::S_PRIM); } // add remaining nodes { gcomm::View tr(0, ViewId(V_TRANS, n1.uuid(), 1)); tr.members().insert_unique(std::make_pair(n1.uuid(), 0)); n1.p().handle_view(tr); } { gcomm::View tr(0, ViewId(V_TRANS, n2.uuid(), 0)); tr.members().insert_unique(std::make_pair(n2.uuid(), 0)); n2.p().connect(false); n2.p().handle_view(tr); } { gcomm::View tr(0, ViewId(V_TRANS, n3.uuid(), 0)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().connect(false); n3.p().handle_view(tr); } { gcomm::View reg(0, ViewId(V_REG, n1.uuid(), 2)); reg.members().insert_unique(std::make_pair(n1.uuid(), 0)); reg.members().insert_unique(std::make_pair(n2.uuid(), 0)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n1.p().handle_view(reg); n2.p().handle_view(reg); n3.p().handle_view(reg); Datagram* dg(n1.tp().out()); ck_assert(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; dg = n2.tp().out(); ck_assert(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); delete dg; dg = n3.tp().out(); ck_assert(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); delete dg; dg = n1.tp().out(); ck_assert(dg != 0); n1.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n1.uuid())); delete dg; ck_assert(n1.tp().out() == 0 && n1.p().state() == gcomm::pc::Proto::S_PRIM); ck_assert(n2.tp().out() == 0 && n2.p().state() == gcomm::pc::Proto::S_PRIM); ck_assert(n3.tp().out() == 0 && n3.p().state() == gcomm::pc::Proto::S_PRIM); } mark_point(); // drop n1 from view and deliver only state messages in // the following reg view { gcomm::View tr(0, gcomm::ViewId(V_TRANS, n1.uuid(), 2)); tr.members().insert_unique(std::make_pair(n2.uuid(), 0)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n2.p().handle_view(tr); n3.p().handle_view(tr); gcomm::View reg(0, gcomm::ViewId(V_REG, n2.uuid(), 3)); reg.members().insert_unique(std::make_pair(n2.uuid(), 0)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n2.p().handle_view(reg); n3.p().handle_view(reg); Datagram* dg(n2.tp().out()); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n2.uuid())); delete dg; dg = n3.tp().out(); n2.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); n3.p().handle_up(0, *dg, gcomm::ProtoUpMeta(n3.uuid())); delete dg; // Clean up n2 out queue dg = n2.tp().out(); delete dg; } mark_point(); // drop n2 from view and make sure that n3 ends in non-prim { gcomm::View tr(0, gcomm::ViewId(V_TRANS, n2.uuid(), 3)); tr.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().handle_view(tr); ck_assert(n3.tp().out() == 0 && n3.p().state() == gcomm::pc::Proto::S_TRANS); gcomm::View reg(0, gcomm::ViewId(V_REG, n3.uuid(), 4)); reg.members().insert_unique(std::make_pair(n3.uuid(), 0)); n3.p().handle_view(reg); ck_assert(n3.p().state() == gcomm::pc::Proto::S_STATES_EXCH); Datagram* dg(n3.tp().out()); ck_assert(dg != 0); n3.p().handle_up(0, *dg, ProtoUpMeta(n3.uuid())); delete dg; dg = n3.tp().out(); ck_assert_msg(dg == 0 && n3.p().state() == gcomm::pc::Proto::S_NON_PRIM, "%p %d", dg, n3.p().state()); } } END_TEST START_TEST(test_fifo_violation) { log_info << "START (test_fifo_violation)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); assert(pc1.state() == Proto::S_PRIM); pu1.send(); pu1.send(); Datagram* dg1(tp1.out()); ck_assert(dg1 != 0); Datagram* dg2(tp1.out()); ck_assert(dg2 != 0); try { pc1.handle_up(0, *dg2, ProtoUpMeta(uuid1, ViewId(), 0, 0xff, O_SAFE)); ck_abort_msg("Exception not thrown"); } catch (Exception& e) { ck_assert(e.get_errno() == ENOTRECOVERABLE); } delete dg1; delete dg2; } END_TEST START_TEST(test_checksum) { log_info << "START (test_checksum)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); conf.set(Conf::PcChecksum, gu::to_string(true)); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf, uuid1, 0); DummyTransport tp1; PCUser pu1(conf, uuid1, &tp1, &pc1); single_boot(0, &pu1); assert(pc1.state() == Proto::S_PRIM); pu1.send(); Datagram* dg(tp1.out()); ck_assert(dg != 0); dg->normalize(); pc1.handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; pu1.send(); dg = tp1.out(); ck_assert(dg != 0); dg->normalize(); *(&dg->payload()[0] + dg->payload().size() - 1) ^= 0x10; try { pc1.handle_up(0, *dg, ProtoUpMeta(uuid1)); ck_abort_msg("Exception not thrown"); } catch (Exception& e) { ck_assert(e.get_errno() == ENOTRECOVERABLE); } delete dg; } END_TEST START_TEST(test_trac_277) { log_info << "START (test_trac_277)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } log_info << "generate messages"; send_n(dn[0], 1); send_n(dn[1], 1); send_n(dn[2], 1); gu_trace(prop.propagate_until_empty()); log_info << "isolate 3"; prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_PRIM); set_cvi(dn, 2, 2, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "isolate 1 and 2"; ++view_seq; prop.split(1, 2); set_cvi(dn, 0, 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "merge 1 and 2"; ++view_seq; prop.merge(1, 2); set_cvi(dn, 0, 1, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); log_info << "merge 3"; ++view_seq; prop.merge(1, 3); prop.merge(2, 3); set_cvi(dn, 0, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST // This test checks the case when another node of two node cluster // crashes or becomes completely isolated and prim view of cluster // is established by starting third instance directly in prim mode. START_TEST(test_trac_622_638) { log_info << "START (test_trac_622_638)"; vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; // Create two node cluster and make it split. First node is // considered crashed after split (stay isolated in non-prim). dn.push_back(create_dummy_node(1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[0], true)); set_cvi(dn, 0, 0, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); dn.push_back(create_dummy_node(2, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[1], false)); set_cvi(dn, 0, 1, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); log_info << "generate messages"; send_n(dn[0], 1); send_n(dn[1], 1); gu_trace(prop.propagate_until_empty()); log_info << "isolate 1 and 2"; prop.split(1, 2); ++view_seq; set_cvi(dn, 0, 0, view_seq, V_NON_PRIM); set_cvi(dn, 1, 1, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); // Add third node which will be connected with node 2. This will // be started with prim status. dn.push_back(create_dummy_node(3, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[2], true)); prop.split(1, 3); // avoid 1 <-> 3 communication ++view_seq; set_cvi(dn, 1, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); check_trace(dn); for_each(dn.begin(), dn.end(), DeleteObject()); } END_TEST START_TEST(test_weighted_quorum) { log_info << "START (test_weighted_quorum)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period, i)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } for (size_t i(0); i < n_nodes; ++i) { int weight(pc_from_dummy(dn[i])->cluster_weight()); ck_assert_msg(weight == 3, "index: %zu weight: %d", i, weight); } // split node 3 (weight 2) out, node 3 should remain in prim while // nodes 1 and 2 (weights 0 + 1 = 1) should end up in non-prim prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_NON_PRIM); set_cvi(dn, 2, 2, view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(true)); ck_assert(pc_from_dummy(dn[0])->cluster_weight() == 0); ck_assert(pc_from_dummy(dn[1])->cluster_weight() == 0); ck_assert(pc_from_dummy(dn[2])->cluster_weight() == 2); std::for_each(dn.begin(), dn.end(), gu::DeleteObject()); } END_TEST // // The scenario is the following (before fix): // // - Two nodes 2 and 3 started with weights 1 // - Third node 1 with weight 3 is brought in the cluster // (becomes representative) // - Partitioning to (1) and (2, 3) happens so that INSTALL message is // delivered on 2 and 3 in TRANS and on 1 in REG // - Node 1 forms PC // - Nodes 2 and 3 renegotiate and form PC too because node 1 was not present // in the previous PC // // What should happen is that nodes 2 and 3 recompute quorum on handling // install message and shift to non-PC // START_TEST(test_weighted_partitioning_1) { log_info << "START (test_weighted_partitioning_1)"; gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); single_boot(0, &pu3); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu3, &pu2); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); // trans views { View tr1(0, ViewId(V_TRANS, uuid1, 0)); tr1.add_member(uuid1, 0); pu1.pc()->connect(false); ProtoUpMeta um1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), um1); View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); ProtoUpMeta um23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), um23); pu3.pc()->handle_up(0, Datagram(), um23); } // reg view { View reg(0, ViewId(V_REG, uuid1, pu2.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); reg.add_member(uuid3, 0); ProtoUpMeta um(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), um); pu2.pc()->handle_up(0, Datagram(), um); pu3.pc()->handle_up(0, Datagram(), um); } // states exch { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; dg = pu2.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; ck_assert(pu2.tp()->out() == 0); ck_assert(pu3.tp()->out() == 0); } // install msg { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); ck_assert(pu1.pc()->state() == Proto::S_PRIM); // trans view for 2 and 3 View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); tr23.add_partitioned(uuid1, 0); ProtoUpMeta trum23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), trum23); pu3.pc()->handle_up(0, Datagram(), trum23); // 2 and 3 handle install pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; // reg view for 2 and 3 View reg23(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(uuid2, 0); reg23.add_member(uuid3, 0); ProtoUpMeta rum23(UUID::nil(), ViewId(), ®23); pu2.pc()->handle_up(0, Datagram(), rum23); pu3.pc()->handle_up(0, Datagram(), rum23); // states exch dg = pu2.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; // 2 and 3 should end up in non prim ck_assert_msg(pu2.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu2.pc()->state()).c_str()); ck_assert_msg(pu3.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu3.pc()->state()).c_str()); } } END_TEST // // - Two nodes 2 and 3 started with weights 1 // - Third node 1 with weight 3 is brought in the cluster // (becomes representative) // - Partitioning to (1) and (2, 3) happens so that INSTALL message is // delivered in trans view on all nodes // - All nodes should end up in non-prim, nodes 2 and 3 because they don't know // if node 1 ended up in prim (see test_weighted_partitioning_1 above), // node 1 because it hasn't been in primary before and fails to deliver // install message in reg view // START_TEST(test_weighted_partitioning_2) { log_info << "START (test_weighted_partitioning_2)"; gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); single_boot(0, &pu3); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu3, &pu2); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); // trans views { View tr1(0, ViewId(V_TRANS, uuid1, 0)); tr1.add_member(uuid1, 0); pu1.pc()->connect(false); ProtoUpMeta um1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), um1); View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); ProtoUpMeta um23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), um23); pu3.pc()->handle_up(0, Datagram(), um23); } // reg view { View reg(0, ViewId(V_REG, uuid1, pu2.pc()->current_view().id().seq() + 1)); reg.add_member(uuid1, 0); reg.add_member(uuid2, 0); reg.add_member(uuid3, 0); ProtoUpMeta um(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), um); pu2.pc()->handle_up(0, Datagram(), um); pu3.pc()->handle_up(0, Datagram(), um); } // states exch { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); delete dg; dg = pu2.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; ck_assert(pu2.tp()->out() == 0); ck_assert(pu3.tp()->out() == 0); } // install msg { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); // trans view for 1 View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(uuid1, 0); tr1.add_partitioned(uuid2, 0); tr1.add_partitioned(uuid3, 0); ProtoUpMeta trum1(UUID::nil(), ViewId(), &tr1); pu1.pc()->handle_up(0, Datagram(), trum1); ck_assert(pu1.pc()->state() == Proto::S_TRANS); // 1 handle install pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); ck_assert(pu1.pc()->state() == Proto::S_TRANS); // trans view for 2 and 3 View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(uuid2, 0); tr23.add_member(uuid3, 0); tr23.add_partitioned(uuid1, 0); ProtoUpMeta trum23(UUID::nil(), ViewId(), &tr23); pu2.pc()->handle_up(0, Datagram(), trum23); pu3.pc()->handle_up(0, Datagram(), trum23); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu3.pc()->state() == Proto::S_TRANS); // 2 and 3 handle install pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu3.pc()->state() == Proto::S_TRANS); delete dg; // reg view for 1 View reg1(0, ViewId(V_REG, uuid1, pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(uuid1, 0); ProtoUpMeta rum1(UUID::nil(), ViewId(), ®1); pu1.pc()->handle_up(0, Datagram(), rum1); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); // reg view for 2 and 3 View reg23(0, ViewId(V_REG, uuid2, pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(uuid2, 0); reg23.add_member(uuid3, 0); ProtoUpMeta rum23(UUID::nil(), ViewId(), ®23); pu2.pc()->handle_up(0, Datagram(), rum23); pu3.pc()->handle_up(0, Datagram(), rum23); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3.pc()->state() == Proto::S_STATES_EXCH); // states exch dg = pu1.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(uuid1)); ck_assert_msg(pu1.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu1.pc()->state()).c_str()); delete dg; dg = pu2.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid2)); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(uuid3)); delete dg; ck_assert_msg(pu2.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu2.pc()->state()).c_str()); ck_assert_msg(pu3.pc()->state() == Proto::S_NON_PRIM, "state: %s", Proto::to_string(pu3.pc()->state()).c_str()); } } END_TEST // // - Nodes 1-3 started with equal weights // - Weight for node 1 is changed to 3 // - Group splits to (1), (2, 3) // - Weigh changing message is delivered in reg view in (1) and in // trans in (2, 3) // - Expected outcome: 1 stays in prim, 2 and 3 end up in non-prim // START_TEST(test_weight_change_partitioning_1) { log_info << "START (test_weight_change_partitioning_1)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "1"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { Protolay::sync_param_cb_t sync_param_cb; pu1.pc()->set_param("pc.weight", "3", sync_param_cb); ck_assert(sync_param_cb.empty() == false); Datagram* install_dg(pu1.tp()->out()); ck_assert(install_dg != 0); // node 1 handle weight change install, proceed to singleton prim pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pu1.pc()->state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(pu1.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; ck_assert(pu1.pc()->state() == Proto::S_INSTALL); dg = pu1.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; ck_assert(pu1.pc()->state() == Proto::S_PRIM); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_partitioned(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3.pc()->state() == Proto::S_STATES_EXCH); dg = pu2.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; ck_assert(pu2.pc()->state() == Proto::S_NON_PRIM); ck_assert(pu3.pc()->state() == Proto::S_NON_PRIM); delete install_dg; } } END_TEST // // - Nodes 2 and 3 start with weight 1, node 1 with weight 3 // - Weight for node 1 is changed to 1 // - Group splits to (1), (2, 3) // - Weigh changing message is delivered in reg view in (1) and in // trans in (2, 3) // - Expected outcome: all nodes go non-prim // START_TEST(test_weight_change_partitioning_2) { log_info << "START (test_weight_change_partitioning_2)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { Protolay::sync_param_cb_t sync_param_cb; pu1.pc()->set_param("pc.weight", "1", sync_param_cb); ck_assert(sync_param_cb.empty() == false); Datagram* install_dg(pu1.tp()->out()); ck_assert(install_dg != 0); // node 1 handle weight change install, proceed to singleton prim pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View tr1(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pu1.pc()->state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg1.add_member(pu1.uuid(), 0); pu1.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; ck_assert(pu1.pc()->state() == Proto::S_NON_PRIM); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_partitioned(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3.pc()->state() == Proto::S_STATES_EXCH); dg = pu2.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; ck_assert(pu2.pc()->state() == Proto::S_NON_PRIM); ck_assert(pu3.pc()->state() == Proto::S_NON_PRIM); delete install_dg; } } END_TEST // // Weight changing message is delivered in transitional view when new node is // joining. All nodes should end up in prim. // START_TEST(test_weight_change_joining) { log_info << "START (test_weight_change_joining)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "1"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "1"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); // weight change { Protolay::sync_param_cb_t sync_param_cb; pu1.pc()->set_param("pc.weight", "1", sync_param_cb); ck_assert(sync_param_cb.empty() == false); Datagram* install_dg(pu1.tp()->out()); ck_assert(install_dg != 0); // trans views { View tr12(0, ViewId(V_TRANS, pu1.pc()->current_view().id())); tr12.add_member(pu1.uuid(), 0); tr12.add_member(pu2.uuid(), 0); ProtoUpMeta trum12(UUID::nil(), ViewId(), &tr12); pu1.pc()->handle_up(0, Datagram(), trum12); pu2.pc()->handle_up(0, Datagram(), trum12); ck_assert(pu1.pc()->state() == Proto::S_TRANS); ck_assert(pu2.pc()->state() == Proto::S_TRANS); // deliver weight change install in trans view pu1.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->connect(false); View tr3(0, ViewId(V_TRANS, pu3.uuid(), 0)); tr3.add_member(pu3.uuid(), 0); ProtoUpMeta trum3(UUID::nil(), ViewId(), &tr3); pu3.pc()->handle_up(0, Datagram(), trum3); ck_assert(pu3.pc()->state() == Proto::S_TRANS); } // reg view { View reg(0, ViewId(V_REG, pu1.uuid(), pu1.pc()->current_view().id().seq() + 1)); reg.add_member(pu1.uuid(), 0); reg.add_member(pu2.uuid(), 0); reg.add_member(pu3.uuid(), 0); ProtoUpMeta pum(UUID::nil(), ViewId(), ®); pu1.pc()->handle_up(0, Datagram(), pum); pu2.pc()->handle_up(0, Datagram(), pum); pu3.pc()->handle_up(0, Datagram(), pum); ck_assert(pu1.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3.pc()->state() == Proto::S_STATES_EXCH); } // states exch { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; dg = pu2.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; ck_assert(pu1.pc()->state() == Proto::S_INSTALL); ck_assert(pu2.pc()->state() == Proto::S_INSTALL); ck_assert(pu3.pc()->state() == Proto::S_INSTALL); } // install { Datagram* dg(pu1.tp()->out()); ck_assert(dg != 0); pu1.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); delete dg; ck_assert(pu1.pc()->state() == Proto::S_PRIM); ck_assert(pu2.pc()->state() == Proto::S_PRIM); ck_assert(pu3.pc()->state() == Proto::S_PRIM); } delete install_dg; } } END_TEST // // One of the nodes leaves gracefully from group and weight change message // is delivered in trans view. Remaining nodes must not enter non-prim. // START_TEST(test_weight_change_leaving) { log_info << "START (test_weight_change_leaving)"; gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); conf1.set("pc.weight", "3"); UUID uuid1(1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); conf2.set("pc.weight", "2"); UUID uuid2(2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); conf3.set("pc.weight", "1"); UUID uuid3(3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); // weight change { Protolay::sync_param_cb_t sync_param_cb; // change weight for node 2 while node 1 leaves the group gracefully pu2.pc()->set_param("pc.weight", "1", sync_param_cb); ck_assert(sync_param_cb.empty() == false); Datagram* install_dg(pu2.tp()->out()); ck_assert(install_dg != 0); // nodes 2 and 3 go to trans, handle install View tr23(0, ViewId(V_TRANS, pu2.pc()->current_view().id())); tr23.add_member(pu2.uuid(), 0); tr23.add_member(pu3.uuid(), 0); tr23.add_left(pu1.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr23)); ck_assert(pu2.pc()->state() == Proto::S_TRANS); ck_assert(pu3.pc()->state() == Proto::S_TRANS); pu2.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); pu3.pc()->handle_up(0, *install_dg, ProtoUpMeta(pu1.uuid())); View reg23(0, ViewId(V_REG, pu2.uuid(), pu2.pc()->current_view().id().seq() + 1)); reg23.add_member(pu2.uuid(), 0); reg23.add_member(pu3.uuid(), 0); pu2.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); pu3.pc()->handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®23)); ck_assert(pu2.pc()->state() == Proto::S_STATES_EXCH); ck_assert(pu3.pc()->state() == Proto::S_STATES_EXCH); Datagram* dg(pu2.tp()->out()); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); delete dg; dg = pu3.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; ck_assert(pu2.pc()->state() == Proto::S_INSTALL); ck_assert(pu3.pc()->state() == Proto::S_INSTALL); dg = pu2.tp()->out(); ck_assert(dg != 0); pu2.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); pu3.pc()->handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); delete dg; ck_assert(pu2.pc()->state() == Proto::S_PRIM); ck_assert(pu3.pc()->state() == Proto::S_PRIM); delete install_dg; } } END_TEST // node1 and node2 are a cluster. // before node3 joins, node2 lost connection to node1 and node3. // after node1 and node3 merged, node2 joins. // we expect all nodes are a cluster, and they are all in prim state. static void _test_join_split_cluster( const UUID& uuid1, const UUID& uuid2, const UUID& uuid3) { // construct restored view. const UUID& prim_uuid = uuid1 < uuid2 ? uuid1 : uuid2; View rst_view(0, ViewId(V_PRIM, prim_uuid, 0)); rst_view.add_member(uuid1, 0); rst_view.add_member(uuid2, 0); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); pc1.set_restored_view(&rst_view); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); pc2.set_restored_view(&rst_view); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); // assume previous cluster is node1 and node3. const UUID& prim_uuid2 = uuid1 < uuid3 ? uuid1 : uuid3; View rst_view2(0, ViewId(V_PRIM, prim_uuid2, 0)); rst_view2.add_member(uuid1, 0); rst_view2.add_member(uuid3, 0); pc3.set_restored_view(&rst_view2); { uint32_t seq = pc1.current_view().id().seq(); const UUID& reg_uuid = pu1.uuid() < pu3.uuid() ? pu1.uuid() : pu3.uuid(); // node1 View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, reg_uuid, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); reg1.add_partitioned(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); // node3 View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.connect(false); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, reg_uuid, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); ck_assert(dg1 != 0); Datagram* dg3(pu3.tp()->out()); ck_assert(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc1.state() == Proto::S_NON_PRIM); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc3.state() == Proto::S_NON_PRIM); delete dg1; delete dg3; } { // node2 uint32_t seq = pc2.current_view().id().seq(); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); tr2.add_partitioned(pu1.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); ck_assert(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, pc2.uuid(), seq + 1)); reg2.add_member(pu2.uuid(), 0); reg2.add_partitioned(pu1.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); ck_assert(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg2(pu2.tp()->out()); ck_assert(dg2 != 0); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); ck_assert(pc2.state() == Proto::S_NON_PRIM); delete dg2; } { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); ck_assert(pc2.state() == Proto::S_TRANS); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu1.uuid(), 0); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); int seq = pc1.current_view().id().seq(); const UUID& reg_uuid1 = pu1.uuid() < pu2.uuid() ? pu1.uuid() : pu2.uuid(); const UUID& reg_uuid = reg_uuid1 < pu3.uuid() ? reg_uuid1 : pu3.uuid(); View reg1(0, ViewId(V_REG, reg_uuid, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); View reg2(0, ViewId(V_REG, reg_uuid, seq + 1)); reg2.add_member(pu1.uuid(), 0); reg2.add_member(pu2.uuid(), 0); reg2.add_member(pu3.uuid(), 0); reg2.add_joined(pu1.uuid(), 0); reg2.add_joined(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); ck_assert(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc1.state() == Proto::S_INSTALL); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc2.state() == Proto::S_INSTALL); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc3.state() == Proto::S_INSTALL); delete dg1; delete dg2; delete dg3; Datagram* dg = 0; PCUser* pcs[3] = {&pu1, &pu2, &pu3}; for (int i=0; i<3; i++) { if (pcs[i]->uuid() == reg_uuid) { dg = pcs[i]->tp()->out(); ck_assert(dg != 0); } else { ck_assert(!pcs[i]->tp()->out()); } } pc1.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); pc2.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); pc3.handle_up(0, *dg, ProtoUpMeta(reg_uuid)); ck_assert(pc1.state() == Proto::S_PRIM); ck_assert(pc2.state() == Proto::S_PRIM); ck_assert(pc3.state() == Proto::S_PRIM); delete dg; } } START_TEST(test_join_split_cluster) { log_info << "START (test_join_split_cluster)"; gu_conf_debug_on(); UUID uuid1(1); UUID uuid2(2); UUID uuid3(3); _test_join_split_cluster(uuid1, uuid2, uuid3); _test_join_split_cluster(uuid2, uuid1, uuid3); _test_join_split_cluster(uuid2, uuid3, uuid1); } END_TEST START_TEST(test_trac_762) { log_info << "START (trac_762)"; size_t n_nodes(3); vector dn; PropagationMatrix prop; const string suspect_timeout("PT0.35S"); const string inactive_timeout("PT0.7S"); const string retrans_period("PT0.1S"); uint32_t view_seq = 0; for (size_t i = 0; i < n_nodes; ++i) { dn.push_back(create_dummy_node(i + 1, 0, suspect_timeout, inactive_timeout, retrans_period)); gu_trace(join_node(&prop, dn[i], i == 0)); set_cvi(dn, 0, i, ++view_seq, V_PRIM); gu_trace(prop.propagate_until_cvi(false)); } log_info << "split 1"; // split group so that node 3 becomes isolated prop.split(1, 3); prop.split(2, 3); ++view_seq; set_cvi(dn, 0, 1, view_seq, V_PRIM); set_cvi(dn, 2, 2, view_seq, V_NON_PRIM); gu_trace(prop.propagate_until_cvi(true)); mark_point(); log_info << "remerge 1"; // detach PC layer from EVS and lower layers, attach to DummyTransport for (size_t i(0); i < n_nodes; ++i) { std::list::iterator li0(dn[i]->protos().begin()); std::list::iterator li1(li0); ++li1; assert(li1 != dn[i]->protos().end()); std::list::iterator li2(li1); ++li2; assert(li2 != dn[i]->protos().end()); gcomm::disconnect(*li0, *li1); gcomm::disconnect(*li1, *li2); delete *li0; delete *li1; dn[i]->protos().pop_front(); dn[i]->protos().pop_front(); DummyTransport* tp(new DummyTransport(dn[i]->uuid(), true)); dn[i]->protos().push_front(tp); gcomm::connect(tp, *li2); } Proto* pc1(pc_from_dummy(dn[0])); DummyTransport* tp1(reinterpret_cast( dn[0]->protos().front())); Proto* pc2(pc_from_dummy(dn[1])); DummyTransport* tp2(reinterpret_cast( dn[1]->protos().front())); Proto* pc3(pc_from_dummy(dn[2])); DummyTransport* tp3(reinterpret_cast( dn[2]->protos().front())); // remerge group, process event by event so that nodes 1 and 2 handle // install message in reg view and reach prim view, node 3 partitions and // handles install in trans view and marks nodes 1 and 2 to have un state { View tr1(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr1.add_member(tp1->uuid(), 0); tr1.add_member(tp2->uuid(), 0); pc1->handle_view(tr1); pc2->handle_view(tr1); View tr2(0, ViewId(V_TRANS, tp3->uuid(), view_seq)); tr2.add_member(tp3->uuid(), 0); pc3->handle_view(tr2); ++view_seq; View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); reg.add_member(tp3->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); pc3->handle_view(reg); // states exch Datagram* dg(tp1->out()); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); delete dg; dg = tp3->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; // install message dg = tp1->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); View tr3(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr3.add_member(tp1->uuid(), 0); tr3.add_member(tp2->uuid(), 0); tr3.add_partitioned(tp3->uuid(), 0); pc1->handle_view(tr3); pc2->handle_view(tr3); View tr4(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr4.add_member(tp3->uuid(), 0); tr4.add_partitioned(tp1->uuid(), 0); tr4.add_partitioned(tp2->uuid(), 0); pc3->handle_view(tr4); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; } ++view_seq; // ... intermediate reg/trans views // 1 and 2 { View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); View tr(0, ViewId(V_TRANS, tp1->uuid(), view_seq)); tr.add_member(tp1->uuid(), 0); tr.add_member(tp2->uuid(), 0); pc1->handle_view(tr); pc2->handle_view(tr); Datagram* dg(tp1->out()); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; } // 3 { View reg(0, ViewId(V_REG, tp3->uuid(), view_seq)); reg.add_member(tp3->uuid(), 0); pc3->handle_view(reg); Datagram* dg(tp3->out()); ck_assert(dg != 0); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; View tr(0, ViewId(V_TRANS, tp3->uuid(), view_seq)); tr.add_member(tp3->uuid(), 0); pc3->handle_view(tr); } // Remerge and PC crash should occur if bug is present. ++view_seq; { View reg(0, ViewId(V_REG, tp1->uuid(), view_seq)); reg.add_member(tp1->uuid(), 0); reg.add_member(tp2->uuid(), 0); reg.add_member(tp3->uuid(), 0); pc1->handle_view(reg); pc2->handle_view(reg); pc3->handle_view(reg); // State msgs Datagram* dg(tp1->out()); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); delete dg; dg = tp2->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp2->uuid())); delete dg; dg = tp3->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp3->uuid())); delete dg; // Install msg dg = tp1->out(); ck_assert(dg != 0); pc1->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc2->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); pc3->handle_up(0, *dg, ProtoUpMeta(tp1->uuid())); ck_assert(tp1->out() == 0); ck_assert(tp2->out() == 0); ck_assert(tp3->out() == 0); delete dg; } std::for_each(dn.begin(), dn.end(), gu::DeleteObject()); } END_TEST START_TEST(test_gh_92) { UUID uuid1(1), uuid2(2), uuid3(3); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(0, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(0, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(0, &pu1, &pu2, &pu3); uint32_t seq = pc1.current_view().id().seq(); Datagram* im = 0; Datagram* dg = 0; // they split into three parts. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_partitioned(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); dg = pu1.tp()->out(); pc1.handle_up(0, *dg, ProtoUpMeta(pu1.uuid())); ck_assert(pc1.state() == Proto::S_NON_PRIM); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); tr2.add_partitioned(pu1.uuid(), 0); tr2.add_partitioned(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); ck_assert(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, uuid2, seq + 1)); reg2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); ck_assert(pc2.state() == Proto::S_STATES_EXCH); delete dg; dg = pu2.tp()->out(); pc2.handle_up(0, *dg, ProtoUpMeta(pu2.uuid())); ck_assert(pc2.state() == Proto::S_NON_PRIM); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); tr3.add_partitioned(pu1.uuid(), 0); tr3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid3, seq + 1)); reg3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); delete dg; dg = pu3.tp()->out(); pc3.handle_up(0, *dg, ProtoUpMeta(pu3.uuid())); ck_assert(pc3.state() == Proto::S_NON_PRIM); delete dg; dg = 0; } seq += 1; // they try to merge into a primary component, but fails when sending install message. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu2.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); View tr2(0, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pu2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); ck_assert(pc2.state() == Proto::S_TRANS); View reg2(0, ViewId(V_REG, uuid1, seq + 1)); reg2.add_member(pu1.uuid(), 0); reg2.add_member(pu2.uuid(), 0); reg2.add_member(pu3.uuid(), 0); reg2.add_joined(pu1.uuid(), 0); reg2.add_joined(pu3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); ck_assert(pc2.state() == Proto::S_STATES_EXCH); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu2.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); reg3.add_joined(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); ck_assert(dg1 != 0); ck_assert(dg2 != 0); ck_assert(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); delete dg1; delete dg2; delete dg3; ck_assert(pc1.state() == Proto::S_INSTALL); ck_assert(pc2.state() == Proto::S_INSTALL); ck_assert(pc3.state() == Proto::S_INSTALL); im = pu1.tp()->out(); ck_assert(im != 0); ck_assert(pu2.tp()->out() == 0); ck_assert(pu3.tp()->out() == 0); } seq += 1; // node3 is separate from node1 and node2. // they get the stale install message when they get transient view. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu2.uuid(), 0); tr1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); ck_assert(pc2.state() == Proto::S_TRANS); pc1.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); ck_assert(pc1.state() == Proto::S_TRANS); ck_assert(pc2.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_partitioned(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); ck_assert(pc2.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); ck_assert(dg1 != 0); ck_assert(dg2 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); ck_assert(pc1.state() == Proto::S_NON_PRIM); ck_assert(pc2.state() == Proto::S_NON_PRIM); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); tr3.add_partitioned(pu1.uuid(), 0); tr3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); pc3.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); ck_assert(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid3, seq + 1)); reg3.add_member(pu3.uuid(), 0); reg3.add_partitioned(pu1.uuid(), 0); reg3.add_partitioned(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg3(pu3.tp()->out()); ck_assert(dg3 != 0); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); ck_assert(pc3.state() == Proto::S_NON_PRIM); delete dg1; delete dg2; delete dg3; } seq += 1; // then they try to merge into a primary component again. { View tr1(0, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pu1.uuid(), 0); tr1.add_member(pu2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); ck_assert(pc1.state() == Proto::S_TRANS); ck_assert(pc2.state() == Proto::S_TRANS); View reg1(0, ViewId(V_REG, uuid1, seq + 1)); reg1.add_member(pu1.uuid(), 0); reg1.add_member(pu2.uuid(), 0); reg1.add_member(pu3.uuid(), 0); reg1.add_joined(pu3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); ck_assert(pc1.state() == Proto::S_STATES_EXCH); ck_assert(pc2.state() == Proto::S_STATES_EXCH); View tr3(0, ViewId(V_TRANS, pc3.current_view().id())); tr3.add_member(pu3.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr3)); ck_assert(pc3.state() == Proto::S_TRANS); View reg3(0, ViewId(V_REG, uuid1, seq + 1)); reg3.add_member(pu1.uuid(), 0); reg3.add_member(pu2.uuid(), 0); reg3.add_member(pu3.uuid(), 0); reg3.add_joined(pu1.uuid(), 0); reg3.add_joined(pu2.uuid(), 0); pc3.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®3)); ck_assert(pc3.state() == Proto::S_STATES_EXCH); Datagram* dg1(pu1.tp()->out()); Datagram* dg2(pu2.tp()->out()); Datagram* dg3(pu3.tp()->out()); ck_assert(dg1 != 0); ck_assert(dg2 != 0); ck_assert(dg3 != 0); pc1.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc1.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc1.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc2.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc2.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); pc3.handle_up(0, *dg1, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *dg2, ProtoUpMeta(pu2.uuid())); pc3.handle_up(0, *dg3, ProtoUpMeta(pu3.uuid())); delete dg1; delete dg2; delete dg3; ck_assert(pc1.state() == Proto::S_INSTALL); ck_assert(pc2.state() == Proto::S_INSTALL); ck_assert(pc3.state() == Proto::S_INSTALL); delete im; im = pu1.tp()->out(); ck_assert(im != 0); ck_assert(pu2.tp()->out() == 0); ck_assert(pu3.tp()->out() == 0); pc1.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc2.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); pc3.handle_up(0, *im, ProtoUpMeta(pu1.uuid())); ck_assert(pc1.state() == Proto::S_PRIM); ck_assert(pc2.state() == Proto::S_PRIM); ck_assert(pc3.state() == Proto::S_PRIM); delete im; } } END_TEST // Nodes 1, 2, 3. Node 3 will be evicted from group while group is // fully partitioned. After remerging 1 and 2 they should reach // primary component. START_TEST(test_prim_after_evict) { log_info << "START(test_prim_after_evict)"; UUID uuid1(1), uuid2(2), uuid3(3); gu::Config conf1; gu::ssl_register_params(conf1); gcomm::Conf::register_params(conf1); ProtoUpMeta pum1(uuid1); Proto pc1(conf1, uuid1, 0); DummyTransport tp1; PCUser pu1(conf1, uuid1, &tp1, &pc1); single_boot(1, &pu1); gu::Config conf2; gu::ssl_register_params(conf2); gcomm::Conf::register_params(conf2); ProtoUpMeta pum2(uuid2); Proto pc2(conf2, uuid2, 0); DummyTransport tp2; PCUser pu2(conf2, uuid2, &tp2, &pc2); double_boot(1, &pu1, &pu2); gu::Config conf3; gu::ssl_register_params(conf3); gcomm::Conf::register_params(conf3); ProtoUpMeta pum3(uuid3); Proto pc3(conf3, uuid3, 0); DummyTransport tp3; PCUser pu3(conf3, uuid3, &tp3, &pc3); triple_boot(1, &pu1, &pu2, &pu3); // Node 1 partitions { // Trans view View tr1(1, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pc1.uuid(), 0); tr1.add_partitioned(pc2.uuid(), 0); tr1.add_partitioned(pc3.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); // Reg view View reg1(1, ViewId(V_REG, pc1.uuid(), tr1.id().seq() + 1)); reg1.add_member(pc1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®1)); // States exch Datagram* dg(tp1.out()); ck_assert(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; // Non-prim dg = tp1.out(); ck_assert(dg == 0); ck_assert(pc1.state() == Proto::S_NON_PRIM); } // Node 2 partitions { // Trans view View tr2(1, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pc2.uuid(), 0); tr2.add_partitioned(pc1.uuid(), 0); tr2.add_partitioned(pc3.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); // Reg view View reg2(1, ViewId(V_REG, pc2.uuid(), tr2.id().seq() + 1)); reg2.add_member(pc2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®2)); // States exch Datagram* dg(tp2.out()); ck_assert(dg != 0); pc2.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); delete dg; // Non-prim dg = tp2.out(); ck_assert(dg == 0); ck_assert(pc2.state() == Proto::S_NON_PRIM); } // Just forget about node3, it is gone forever // Nodes 1 and 2 set node3 evicted pc1.evict(pc3.uuid()); pc2.evict(pc3.uuid()); // Nodes 1 and 2 merge and should reach Prim { // Trans view for node 1 View tr1(1, ViewId(V_TRANS, pc1.current_view().id())); tr1.add_member(pc1.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr1)); Datagram *dg(tp1.out()); ck_assert(dg == 0); ck_assert(pc1.state() == Proto::S_TRANS); // Trans view for node 2 View tr2(1, ViewId(V_TRANS, pc2.current_view().id())); tr2.add_member(pc2.uuid(), 0); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), &tr2)); dg = tp2.out(); ck_assert(dg == 0); ck_assert(pc2.state() == Proto::S_TRANS); // Reg view for nodes 1 and 2 View reg(1, ViewId(V_REG, pc1.uuid(), tr1.id().seq() + 1)); reg.add_member(pc1.uuid(), 0); reg.add_member(pc2.uuid(), 0); pc1.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®)); pc2.handle_up(0, Datagram(), ProtoUpMeta(UUID::nil(), ViewId(), ®)); // States exchange ck_assert(pc1.state() == Proto::S_STATES_EXCH); ck_assert(pc2.state() == Proto::S_STATES_EXCH); // State message from node 1 dg = tp1.out(); ck_assert(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; dg = tp1.out(); ck_assert(dg == 0); // State message from node 2 dg = tp2.out(); ck_assert(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc2.uuid())); delete dg; dg = tp2.out(); ck_assert(dg == 0); // Install ck_assert_msg(pc1.state() == Proto::S_INSTALL, "state is %s", Proto::to_string(pc1.state()).c_str()); ck_assert_msg(pc2.state() == Proto::S_INSTALL, "state is %s", Proto::to_string(pc2.state()).c_str()); // Install message from node 1 dg = tp1.out(); ck_assert(dg != 0); pc1.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); pc2.handle_up(0, *dg, ProtoUpMeta(pc1.uuid())); delete dg; // Prim dg = tp1.out(); ck_assert(dg == 0); dg = tp2.out(); ck_assert(dg == 0); ck_assert(pc1.state() == Proto::S_PRIM); ck_assert(pc2.state() == Proto::S_PRIM); } } END_TEST class DummyEvs : public gcomm::Bottomlay { public: DummyEvs(gu::Config& conf) : gcomm::Bottomlay(conf) { } int handle_down(Datagram&, const ProtoDownMeta&) { return 0; } }; class DummyTop : public gcomm::Toplay { public: DummyTop(gu::Config& conf) : gcomm::Toplay(conf) { } void handle_up(const void*, const gcomm::Datagram&, const gcomm::ProtoUpMeta&) { } }; // Test outline: // * Three node cluster, nodes n1, n2, n3 // * Current primary view is (n1, n2), view number 2 // * Group is merging, current EVS view is (n1, n2, n3), // view number 3 // * State messages have been delivered, but group partitioned again when // install message was being sent. // * Underlying EVS membership changes so that the transitional view // ends up in (n1, n3), paritioned (n2) // * It is expected that the n1 ends up in non-primary component. START_TEST(test_quorum_2_to_2_in_3_node_cluster) { gu_log_max_level = GU_LOG_DEBUG; gcomm::pc::ProtoBuilder builder; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); // Current view is EVS view (n1, n2, n3), view number 3 gcomm::View current_view(0, gcomm::ViewId(V_REG, gcomm::UUID(1), 3)); current_view.add_member(gcomm::UUID(1), 0); current_view.add_member(gcomm::UUID(2), 0); current_view.add_member(gcomm::UUID(3), 0); // Primary component view (n1, n2), view number 2 gcomm::View pc_view(0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 2)); pc_view.add_member(gcomm::UUID(1), 0); pc_view.add_member(gcomm::UUID(2), 0); // Known instances according to state messages. gcomm::pc::Node node1(true, false, false, 0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 2), 0, 1, 0); gcomm::pc::Node node2(true, false, false, 0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 2), 0, 1, 0); gcomm::pc::Node node3(false, false, false, 0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 1), 0, 1, 0); gcomm::pc::NodeMap instances; instances.insert(std::make_pair(gcomm::UUID(1), node1)); instances.insert(std::make_pair(gcomm::UUID(2), node2)); instances.insert(std::make_pair(gcomm::UUID(3), node3)); // State messages for all nodes. // * Nodes n1, n2 report previous prim view (n1, n2), view number 2. // * Node 3 reports previous prim view (n1, n2, n3), view number 1. gcomm::pc::Proto::SMMap state_msgs; { // Node n1 gcomm::pc::NodeMap nm; nm.insert(std::make_pair(gcomm::UUID(1), node1)); nm.insert(std::make_pair(gcomm::UUID(2), node2)); gcomm::pc::Message msg(1, gcomm::pc::Message::PC_T_STATE, 0, nm); state_msgs.insert(std::make_pair(gcomm::UUID(1), msg)); } { // Node n2 gcomm::pc::NodeMap nm; nm.insert(std::make_pair(gcomm::UUID(1), node1)); nm.insert(std::make_pair(gcomm::UUID(2), node2)); gcomm::pc::Message msg(1, gcomm::pc::Message::PC_T_STATE, 0, nm); state_msgs.insert(std::make_pair(gcomm::UUID(2), msg)); } { // Node3 gcomm::pc::NodeMap nm; // Nodes n1 and n2 have previously been seen in prim view number 1 nm.insert(std::make_pair(gcomm::UUID(1), gcomm::pc::Node( false, false, false, 0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 1), 0, 1, 0))); nm.insert(std::make_pair(gcomm::UUID(2), gcomm::pc::Node( false, false, false, 0, gcomm::ViewId(V_PRIM, gcomm::UUID(1), 1), 0, 1, 0))); nm.insert(std::make_pair(gcomm::UUID(3), node3)); gcomm::pc::Message msg(1, gcomm::pc::Message::PC_T_STATE, 0, nm); state_msgs.insert(std::make_pair(gcomm::UUID(3), msg)); } // Build n1 state in S_INSTALL. builder .conf(conf) .uuid(gcomm::UUID(1)) .state_msgs(state_msgs) .current_view(current_view) .pc_view(pc_view) .instances(instances) .state(gcomm::pc::Proto::S_INSTALL); std::unique_ptr p(builder.make_proto()); DummyEvs devs(conf); DummyTop dtop(conf); gcomm::connect(&devs, p.get()); gcomm::connect(p.get(), &dtop); // Deliver transitional EVS view where members are n1, n3 and // partitioned n2. After handling transitional view n1 is // expected to end up in non-primary. gcomm::View trans_view(0, gcomm::ViewId(V_TRANS, gcomm::UUID(1), 3)); trans_view.add_member(gcomm::UUID(1), 0); trans_view.add_partitioned(gcomm::UUID(2), 0); trans_view.add_member(gcomm::UUID(3), 0); p->handle_view(trans_view); ck_assert(p->state() == gcomm::pc::Proto::S_TRANS); ck_assert(not p->prim()); } END_TEST Suite* pc_suite() { Suite* s = suite_create("gcomm::pc"); TCase* tc; tc = tcase_create("test_pc_messages"); tcase_add_test(tc, test_pc_messages); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_single"); tcase_add_test(tc, test_pc_view_changes_single); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_double"); tcase_add_test(tc, test_pc_view_changes_double); suite_add_tcase(s, tc); tc = tcase_create("test_pc_view_changes_reverse"); tcase_add_test(tc, test_pc_view_changes_reverse); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state1"); tcase_add_test(tc, test_pc_state1); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state2"); tcase_add_test(tc, test_pc_state2); suite_add_tcase(s, tc); tc = tcase_create("test_pc_state3"); tcase_add_test(tc, test_pc_state3); suite_add_tcase(s, tc); tc = tcase_create("test_pc_conflicting_prims"); tcase_add_test(tc, test_pc_conflicting_prims); suite_add_tcase(s, tc); tc = tcase_create("test_pc_conflicting_prims_npvo"); tcase_add_test(tc, test_pc_conflicting_prims_npvo); suite_add_tcase(s, tc); tc = tcase_create("test_pc_split_merge"); tcase_add_test(tc, test_pc_split_merge); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_pc_split_merge_w_user_msg"); tcase_add_test(tc, test_pc_split_merge_w_user_msg); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_pc_complete_split_merge"); tcase_add_test(tc, test_pc_complete_split_merge); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_pc_protocol_upgrade"); tcase_add_test(tc, test_pc_protocol_upgrade); tcase_set_timeout(tc, 25); suite_add_tcase(s, tc); tc = tcase_create("test_trac_191"); tcase_add_test(tc, test_trac_191); suite_add_tcase(s, tc); tc = tcase_create("test_trac_413"); tcase_add_test(tc, test_trac_413); suite_add_tcase(s, tc); tc = tcase_create("test_fifo_violation"); tcase_add_test(tc, test_fifo_violation); suite_add_tcase(s, tc); tc = tcase_create("test_checksum"); tcase_add_test(tc, test_checksum); suite_add_tcase(s, tc); tc = tcase_create("test_trac_277"); tcase_add_test(tc, test_trac_277); suite_add_tcase(s, tc); tc = tcase_create("test_trac_622_638"); tcase_add_test(tc, test_trac_622_638); suite_add_tcase(s, tc); tc = tcase_create("test_weighted_quorum"); tcase_add_test(tc, test_weighted_quorum); suite_add_tcase(s, tc); tc = tcase_create("test_weighted_partitioning_1"); tcase_add_test(tc, test_weighted_partitioning_1); suite_add_tcase(s, tc); tc = tcase_create("test_weighted_partitioning_2"); tcase_add_test(tc, test_weighted_partitioning_2); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_partitioning_1"); tcase_add_test(tc, test_weight_change_partitioning_1); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_partitioning_2"); tcase_add_test(tc, test_weight_change_partitioning_2); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_joining"); tcase_add_test(tc, test_weight_change_joining); suite_add_tcase(s, tc); tc = tcase_create("test_weight_change_leaving"); tcase_add_test(tc, test_weight_change_leaving); suite_add_tcase(s, tc); tc = tcase_create("test_trac_762"); tcase_add_test(tc, test_trac_762); tcase_set_timeout(tc, 15); suite_add_tcase(s, tc); tc = tcase_create("test_join_split_cluster"); tcase_add_test(tc, test_join_split_cluster); suite_add_tcase(s, tc); tc = tcase_create("test_gh_92"); tcase_add_test(tc, test_gh_92); suite_add_tcase(s, tc); tc = tcase_create("test_prim_after_evict"); tcase_add_test(tc, test_prim_after_evict); suite_add_tcase(s, tc); tc = tcase_create("test_quorum_2_to_2_in_3_node_cluster"); tcase_add_test(tc, test_quorum_2_to_2_in_3_node_cluster); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/ssl_test.cpp000644 000164 177776 00000011303 15107057155 021137 0ustar00jenkinsnogroup000000 000000 /* Copyrignt (C) 2014 Codership Oy */ #include "gcomm/protonet.hpp" #include "gcomm/util.hpp" #include "gcomm/conf.hpp" #include #include #include // std::cerr static gu::Config conf; class Client : public gcomm::Toplay { public: Client(gcomm::Protonet& pnet, const std::string& uri) : gcomm::Toplay(conf), uri_ (uri), pnet_ (pnet), pstack_(), socket_(pnet_.socket(uri)), msg_ () { pstack_.push_proto(this); pnet_.insert(&pstack_); } ~Client() { pnet_.erase(&pstack_); pstack_.pop_proto(this); socket_->close(); } void connect(bool f = false) { socket_->connect(uri_); } std::string msg() const { return std::string(msg_.begin(), msg_.end()); } void handle_up(const void* id, const gcomm::Datagram& dg, const gcomm::ProtoUpMeta& um) { if (um.err_no() != 0) { log_error << "socket failed: " << um.err_no(); socket_->close(); throw std::exception(); } else { assert(id == socket_->id()); msg_.insert(msg_.begin(), gcomm::begin(dg), gcomm::begin(dg) + gcomm::available(dg)); } } private: gu::URI uri_; gcomm::Protonet& pnet_; gcomm::Protostack pstack_; gcomm::SocketPtr socket_; gu::Buffer msg_; }; class Server : public gcomm::Toplay { public: Server(gcomm::Protonet& pnet, const std::string& uri) : gcomm::Toplay(conf), uri_(uri), pnet_(pnet), pstack_(), listener_(), smap_(), msg_("hello ssl") { pstack_.push_proto(this); pnet_.insert(&pstack_); listener_ = pnet_.acceptor(uri_); } ~Server() { pnet_.erase(&pstack_); pstack_.pop_proto(this); } void listen() { listener_->listen(uri_); } void handle_up(const void* id, const gcomm::Datagram& dg, const gcomm::ProtoUpMeta& um) { if (id == listener_->id()) { gcomm::SocketPtr socket(listener_->accept()); if (smap_.insert( std::make_pair(socket->id(), socket)).second == false) { throw std::logic_error("duplicate socket entry"); } return; } std::map::iterator si(smap_.find(id)); if (si == smap_.end()) { throw std::logic_error("could not find socket from map"); } gcomm::SocketPtr socket(si->second); if (socket->state() == gcomm::Socket::S_CONNECTED) { gcomm::Datagram msg; msg.payload().resize(msg_.size()); std::copy(msg_.begin(), msg_.end(), msg.payload().begin()); socket->send(0, msg); } else if (socket->state() == gcomm::Socket::S_CLOSED || socket->state() == gcomm::Socket::S_FAILED) { std::cerr << "socket " << id << " failed" << std::endl; socket->close(); smap_.erase(id); } else { std::cerr << "socket state: " << socket->state() << std::endl; } } private: Server(const Server&); void operator=(const Server&); gu::URI uri_; gcomm::Protonet& pnet_; gcomm::Protostack pstack_; std::shared_ptr listener_; std::map smap_; const std::string msg_; }; int main(int argc, char* argv[]) { if (argc != 4) { std::cerr << "usage: " << argv[0] << " <-s|-c> " << std::endl; return 1; } gu::Config conf; gcomm::Conf::register_params(conf); conf.parse(argv[2]); std::unique_ptr pnet(gcomm::Protonet::create(conf)); if (std::string("-s") == argv[1]) { Server server(*pnet, argv[3]); server.listen(); while (true) { pnet->event_loop(gu::datetime::Period(1 * gu::datetime::Sec)); } } else if (std::string("-c") == argv[1]) { Client client(*pnet, argv[3]); client.connect(); while (true) { pnet->event_loop(gu::datetime::Period(1*gu::datetime::MSec)); std::string msg(client.msg()); if (msg != "") { std::cout << "read message from server: '" << msg << "'" << std::endl; break; } } } return 0; } galera-4-26.4.25/gcomm/test/check_gcomm.cpp000644 000164 177776 00000004470 15107057155 021545 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2017 Codership Oy */ #include "check_gcomm.hpp" #include "gu_string_utils.hpp" // strsplit() #include "gu_exception.hpp" #include "gu_logger.hpp" #include #include #include #include #include #include #include // * suits = 0; FILE* log_file(0); if (argc > 1 && !strcmp(argv[1],"nofork")) { srunner_set_fork_status(sr, CK_NOFORK); } else if (argc > 1 && strcmp(argv[1], "nolog") == 0) { /* no log redirection */} else { // running in the background, loggin' to file log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); // redirect occasional stderr there as well if (dup2(fileno(log_file), 2) < 0) { perror("dup2() failed: "); return EXIT_FAILURE; } } if (::getenv("CHECK_GCOMM_DEBUG")) { gu_log_max_level = GU_LOG_DEBUG; //gu::Logger::enable_debug(true); } log_info << "check_gcomm, start tests"; if (::getenv("CHECK_GCOMM_SUITES")) { suits = new vector(gu::strsplit(::getenv("CHECK_GCOMM_SUITES"), ',')); } for (size_t i = 0; suites[i].suite != 0; ++i) { if (suits == 0 || find(suits->begin(), suits->end(), suites[i].name) != suits->end()) { srunner_add_suite(sr, suites[i].suite()); } } delete suits; suits = 0; srunner_run_all(sr, CK_NORMAL); log_info << "check_gcomm, run all tests"; int n_fail = srunner_ntests_failed(sr); srunner_free(sr); if (log_file) fclose(log_file); if (0 == n_fail && 0 != log_file) ::unlink(LOG_FILE); return n_fail == 0 ? EXIT_SUCCESS : EXIT_FAILURE; } galera-4-26.4.25/gcomm/test/SConscript000644 000164 177776 00000003336 15107057155 020614 0ustar00jenkinsnogroup000000 000000 Import('check_env') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' #/common #/galerautils/src #/gcomm/src ''')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) gcomm_check = env.Program(target = 'check_gcomm', source = Split(''' check_fair_send_queue.cpp check_gcomm.cpp check_gmcast.cpp check_trace.cpp check_types.cpp check_util.cpp check_evs2.cpp check_pc.cpp ''')) env.Test("gcomm_check.passed", gcomm_check) Clean(gcomm_check, '#/check_gcomm.log') # Non deterministic tests must be run manually. Import('deterministic_tests all_tests') check_gcomm_nondet = env.Program(target = "check_gcomm_nondet", source = [ "check_gcomm_nondet.cpp", "check_gmcast_nondet.cpp", "check_pc_nondet.cpp", "check_util_nondet.cpp" ]) if not deterministic_tests and all_tests: env.Test("gcomm_check_nondet.passed", check_gcomm_nondet) Clean(check_gcomm_nondet, "#/check_gcomm_nondet.log") ssl_test = env.Program(target = 'ssl_test', source = ['ssl_test.cpp']) galera-4-26.4.25/gcomm/test/check_fair_send_queue.cpp000644 000164 177776 00000012632 15107057155 023600 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2019-2020 Codership Oy // #include "check_gcomm.hpp" #include "fair_send_queue.hpp" #include static gcomm::Datagram make_datagram(char header) { static const char data[1] = { 0 }; gcomm::Datagram ret(gu::SharedBuffer(new gu::Buffer(data, data + 1))); ret.set_header_offset(ret.header_offset() - 1); ret.header()[ret.header_offset()] = header; return ret; } static gu::byte_t get_header(const gcomm::Datagram& dg) { return dg.header()[dg.header_offset()]; } // Test the make_datagram() helper above. START_TEST(test_datagram) { gcomm::Datagram dg(make_datagram(1)); ck_assert(dg.len() == 2); ck_assert(get_header(dg) == 1); } END_TEST START_TEST(test_push_back) { gcomm::FairSendQueue fsq; gcomm::Datagram dg(make_datagram(1)); fsq.push_back(0, dg); ck_assert(fsq.front().len() == 2); ck_assert(get_header(fsq.front()) == 1); } END_TEST START_TEST(test_push_back_same_segments) { gcomm::FairSendQueue fsq; gcomm::Datagram dg(make_datagram(1)); fsq.push_back(0, dg); ck_assert(get_header(fsq.front()) == 1); ck_assert(get_header(fsq.back()) == 1); fsq.push_back(0, make_datagram(2)); ck_assert(get_header(fsq.front()) == 1); ck_assert(get_header(fsq.back()) == 2); } END_TEST START_TEST(test_push_back_different_segments) { gcomm::FairSendQueue fsq; gcomm::Datagram dg(make_datagram(1)); fsq.push_back(0, dg); ck_assert(get_header(fsq.front()) == 1); ck_assert(get_header(fsq.back()) == 1); fsq.push_back(1, make_datagram(2)); ck_assert(get_header(fsq.front()) == 1); ck_assert(get_header(fsq.back()) == 2); } END_TEST START_TEST(test_empty) { gcomm::FairSendQueue fsq; ck_assert(fsq.empty()); fsq.push_back(0, make_datagram(1)); ck_assert(!fsq.empty()); } END_TEST START_TEST(test_size) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); ck_assert(fsq.size() == 1); fsq.push_back(1, make_datagram(2)); ck_assert(fsq.size() == 2); } END_TEST START_TEST(test_pop_front) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); ck_assert(fsq.size() == 1); fsq.pop_front(); ck_assert(fsq.size() == 0); } END_TEST START_TEST(test_pop_front_same_segments) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); ck_assert(fsq.size() == 1); fsq.push_back(0, make_datagram(2)); ck_assert(fsq.size() == 2); ck_assert(get_header(fsq.front()) == 1); fsq.pop_front(); ck_assert(fsq.size() == 1); ck_assert(get_header(fsq.front()) == 2); } END_TEST START_TEST(test_pop_front_different_segments) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); ck_assert(fsq.size() == 1); fsq.push_back(1, make_datagram(2)); ck_assert(fsq.size() == 2); ck_assert(get_header(fsq.front()) == 1); fsq.pop_front(); ck_assert(fsq.size() == 1); ck_assert(get_header(fsq.front()) == 2); } END_TEST START_TEST(test_push_pop_interleaving) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); fsq.push_back(1, make_datagram(2)); fsq.push_back(0, make_datagram(3)); fsq.push_back(1, make_datagram(4)); ck_assert(get_header(fsq.front()) == 1); ck_assert(fsq.size() == 4); fsq.pop_front(); ck_assert(fsq.size() == 3); ck_assert(get_header(fsq.front()) == 2); fsq.pop_front(); ck_assert(fsq.size() == 2); ck_assert(get_header(fsq.front()) == 3); fsq.pop_front(); ck_assert(fsq.size() == 1); ck_assert(get_header(fsq.front()) == 4); fsq.pop_front(); ck_assert(fsq.empty()); } END_TEST START_TEST(test_queued_bytes) { gcomm::FairSendQueue fsq; fsq.push_back(0, make_datagram(1)); ck_assert(fsq.queued_bytes() == 2); fsq.push_back(1, make_datagram(2)); ck_assert(fsq.queued_bytes() == 4); fsq.pop_front(); ck_assert(fsq.queued_bytes() == 2); fsq.pop_front(); ck_assert(fsq.queued_bytes() == 0); } END_TEST Suite* fair_send_queue_suite() { Suite* ret(suite_create("fair_send_queue")); TCase* tc; tc = tcase_create("test_datagram"); tcase_add_test(tc, test_datagram); suite_add_tcase(ret, tc); tc = tcase_create("test_push_back"); tcase_add_test(tc, test_push_back); suite_add_tcase(ret, tc); tc = tcase_create("test_push_back_same_segments"); tcase_add_test(tc, test_push_back_same_segments); suite_add_tcase(ret, tc); tc = tcase_create("test_push_back_different_segments"); tcase_add_test(tc, test_push_back_different_segments); suite_add_tcase(ret, tc); tc = tcase_create("test_empty"); tcase_add_test(tc, test_empty); suite_add_tcase(ret, tc); tc = tcase_create("test_size"); tcase_add_test(tc, test_size); suite_add_tcase(ret, tc); tc = tcase_create("test_pop_front"); tcase_add_test(tc, test_pop_front); suite_add_tcase(ret, tc); tc = tcase_create("test_pop_front_same_segments"); tcase_add_test(tc, test_pop_front_same_segments); suite_add_tcase(ret, tc); tc = tcase_create("test_pop_front_different_segments"); tcase_add_test(tc, test_pop_front_different_segments); suite_add_tcase(ret, tc); tc = tcase_create("test_push_pop_interleaving"); tcase_add_test(tc, test_push_pop_interleaving); suite_add_tcase(ret, tc); tc = tcase_create("test_queued_bytes"); tcase_add_test(tc, test_queued_bytes); suite_add_tcase(ret, tc); return ret; } galera-4-26.4.25/gcomm/test/check_types.cpp000644 000164 177776 00000004522 15107057155 021605 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "check_gcomm.hpp" #include "gcomm/view.hpp" #include "gcomm/types.hpp" #include "gcomm/map.hpp" #include #include #include using std::pair; using std::make_pair; using std::string; #include "check_templ.hpp" #include using namespace gcomm; START_TEST(test_uuid) { UUID uuid; ck_assert_msg(uuid.full_str() == "00000000-0000-0000-0000-000000000000", "%s", uuid.full_str().c_str()); for (size_t i = 0; i < 159; ++i) { UUID uuidrnd(0, 0); log_debug << uuidrnd; } UUID uuid1(0, 0); UUID uuid2(0, 0); ck_assert(uuid1 < uuid2); // Verify that the first 8 chars of the short UUID notation matches // with first 8 chars of full uuid string. std::string full(uuid1.full_str()); std::ostringstream os; os << uuid1; ck_assert_msg(full.compare(0, 8, os.str().substr(0, 8)) == 0, "%s != %s", full.c_str(), os.str().c_str()); } END_TEST START_TEST(test_view) { const UUID uuid1(1); const UUID uuid2(2); const UUID uuid3(3); // View id ordering: // 1) view seq less than // 2) uuid newer than (higher timestamp, greater leading bytes) // 3) view type (reg, trans, non-prim, prim) ViewId v1(V_REG, uuid2, 1); ViewId v2(V_REG, uuid1, 1); ViewId v3(V_TRANS, uuid1, 1); ViewId v4(V_TRANS, uuid3, 2); ViewId v5(V_REG, uuid2, 2); ViewId v6(V_REG, uuid1, 2); ViewId v7(V_TRANS, uuid1, 2); ck_assert(v1 < v2); ck_assert(v2 < v3); ck_assert(v3 < v4); ck_assert(v4 < v5); ck_assert(v5 < v6); ck_assert(v6 < v7); ViewId vid; ck_assert(vid.uuid() == UUID()); ck_assert(vid.seq() == 0); UUID uuid(0, 0); vid = ViewId(V_REG, uuid, 7); ck_assert(vid.uuid() == uuid); ck_assert(vid.seq() == 7); NodeList nl; for (size_t i = 0; i < 7; ++i) { nl.insert(make_pair(UUID(0, 0), Node(0))); } ck_assert(nl.size() == 7); } END_TEST Suite* types_suite() { Suite* s = suite_create("types"); TCase* tc; tc = tcase_create("test_uuid"); tcase_add_test(tc, test_uuid); suite_add_tcase(s, tc); tc = tcase_create("test_view"); tcase_add_test(tc, test_view); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_gcomm.hpp000644 000164 177776 00000002060 15107057155 021543 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009 Codership Oy */ #ifndef CHECK_GCOMM_HPP #define CHECK_GCOMM_HPP struct Suite; /* Tests for various common types */ Suite* types_suite(); /* Tests for utilities */ Suite* util_suite(); /* Util nondeterministic tests */ Suite* util_nondet_suite(); /* Tests for logger */ Suite* logger_suite(); /* Tests for message buffer implementations */ Suite* buffer_suite(); /* Tests for event loop */ Suite* event_suite(); /* Tests for concurrency handling (mutex, cond, thread, etc.)*/ Suite* concurrent_suite(); /* Tests for TCP transport */ Suite* tcp_suite(); /* Tests for GMcast transport */ Suite* gmcast_suite(); /* Tests for GMcast nondeterministic transport */ Suite* gmcast_nondet_suite(); /* Tests for EVS transport */ Suite* evs_suite(); /* Better evs suite */ Suite* evs2_suite(); /* Tests for VS trasport */ Suite* vs_suite(); /* Tests for PC transport */ Suite* pc_suite(); /* PC nondeterministic tests */ Suite* pc_nondet_suite(); /* Fair send queue suite */ Suite* fair_send_queue_suite(); #endif // CHECK_GCOMM_HPP galera-4-26.4.25/gcomm/test/check_gmcast_nondet.cpp000644 000164 177776 00000032471 15107057155 023272 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2020 Codership Oy */ #include "check_gcomm.hpp" #include "gcomm/protostack.hpp" #include "gcomm/conf.hpp" #include "gmcast.hpp" #include "gmcast_message.hpp" #include "gu_asio.hpp" // gu::ssl_register_params() using namespace std; using namespace gcomm; using namespace gcomm::gmcast; using namespace gu::datetime; using gu::byte_t; using gu::Buffer; #include // Note: Multicast test(s) not run by default. static bool test_multicast(false); string mcast_param("gmcast.mcast_addr=239.192.0.11&gmcast.mcast_port=4567"); START_TEST(test_gmcast_multicast) { string uri1("gmcast://?gmcast.group=test&gmcast.mcast_addr=239.192.0.11"); gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr pnet(Protonet::create(conf)); Transport* gm1(Transport::create(*pnet, uri1)); gm1->connect(); gm1->close(); delete gm1; } END_TEST START_TEST(test_gmcast_w_user_messages) { class User : public Toplay { Transport* tp_; size_t recvd_; Protostack pstack_; explicit User(const User&); void operator=(User&); public: User(Protonet& pnet, const std::string& listen_addr, const std::string& remote_addr) : Toplay(pnet.conf()), tp_(0), recvd_(0), pstack_() { string uri("gmcast://"); uri += remote_addr; // != 0 ? remote_addr : ""; uri += "?"; uri += "tcp.non_blocking=1"; uri += "&"; uri += "gmcast.group=testgrp"; uri += "&gmcast.time_wait=PT0.5S"; if (test_multicast == true) { uri += "&" + mcast_param; } uri += "&gmcast.listen_addr=tcp://"; uri += listen_addr; tp_ = Transport::create(pnet, uri); } ~User() { delete tp_; } void start(const std::string& peer = "") { if (peer == "") { tp_->connect(); } else { tp_->connect(peer); } pstack_.push_proto(tp_); pstack_.push_proto(this); } void stop() { pstack_.pop_proto(this); pstack_.pop_proto(tp_); tp_->close(); } void handle_timer() { byte_t buf[16]; memset(buf, 0xa5, sizeof(buf)); Datagram dg(Buffer(buf, buf + sizeof(buf))); send_down(dg, ProtoDownMeta()); } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (rb.len() < rb.offset() + 16) { gu_throw_fatal << "offset error"; } char buf[16]; memset(buf, 0xa5, sizeof(buf)); // cppcheck-suppress uninitstring if (memcmp(buf, &rb.payload()[0] + rb.offset(), 16) != 0) { gu_throw_fatal << "content mismatch"; } recvd_++; } size_t recvd() const { return recvd_; } void set_recvd(size_t val) { recvd_ = val; } Protostack& pstack() { return pstack_; } std::string listen_addr() const { return tp_->listen_addr(); } }; log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); mark_point(); unique_ptr pnet(Protonet::create(conf)); mark_point(); User u1(*pnet, "127.0.0.1:0", ""); pnet->insert(&u1.pstack()); log_info << "u1 start"; u1.start(); pnet->event_loop(Sec/10); ck_assert(u1.recvd() == 0); log_info << "u2 start"; User u2(*pnet, "127.0.0.1:0", u1.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u2.pstack()); u2.start(); while (u1.recvd() <= 50 || u2.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u3 start"; User u3(*pnet, "127.0.0.1:0", u2.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u3.pstack()); u3.start(); while (u3.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u4 start"; User u4(*pnet, "127.0.0.1:0", u2.listen_addr().erase(0, strlen("tcp://"))); pnet->insert(&u4.pstack()); u4.start(); while (u4.recvd() <= 50) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } log_info << "u1 stop"; u1.stop(); pnet->erase(&u1.pstack()); pnet->event_loop(3*Sec); log_info << "u1 start"; pnet->insert(&u1.pstack()); u1.start(u2.listen_addr()); u1.set_recvd(0); u2.set_recvd(0); u3.set_recvd(0); u4.set_recvd(0); for (size_t i(0); i < 30; ++i) { u1.handle_timer(); u2.handle_timer(); pnet->event_loop(Sec/10); } ck_assert(u1.recvd() != 0); ck_assert(u2.recvd() != 0); ck_assert(u3.recvd() != 0); ck_assert(u4.recvd() != 0); pnet->erase(&u4.pstack()); pnet->erase(&u3.pstack()); pnet->erase(&u2.pstack()); pnet->erase(&u1.pstack()); u1.stop(); u2.stop(); u3.stop(); u4.stop(); pnet->event_loop(0); } END_TEST // not run by default, hard coded port START_TEST(test_gmcast_auto_addr) { log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr pnet(Protonet::create(conf)); Transport* tp1 = Transport::create(*pnet, "gmcast://?gmcast.group=test"); Transport* tp2 = Transport::create(*pnet, "gmcast://127.0.0.1:4567" "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:10002"); pnet->insert(&tp1->pstack()); pnet->insert(&tp2->pstack()); tp1->connect(); tp2->connect(); pnet->event_loop(Sec); pnet->erase(&tp2->pstack()); pnet->erase(&tp1->pstack()); tp1->close(); tp2->close(); delete tp1; delete tp2; pnet->event_loop(0); } END_TEST START_TEST(test_gmcast_forget) { gu_conf_self_tstamp_on(); log_info << "START"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); unique_ptr pnet(Protonet::create(conf)); Transport* tp1 = Transport::create(*pnet, "gmcast://" "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); pnet->insert(&tp1->pstack()); tp1->connect(); Transport* tp2 = Transport::create(*pnet, std::string("gmcast://") + tp1->listen_addr().erase( 0, strlen("tcp://")) + "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); Transport* tp3 = Transport::create(*pnet, std::string("gmcast://") + tp1->listen_addr().erase( 0, strlen("tcp://")) + "?gmcast.group=test&gmcast.listen_addr=tcp://127.0.0.1:0"); pnet->insert(&tp2->pstack()); pnet->insert(&tp3->pstack()); tp2->connect(); tp3->connect(); pnet->event_loop(Sec); UUID uuid1 = tp1->uuid(); tp1->close(); tp2->close(uuid1); tp3->close(uuid1); pnet->event_loop(10*Sec); tp1->connect(); // @todo Implement this using User class above and verify that // tp2 and tp3 communicate with each other but now with tp1 log_info << "####"; pnet->event_loop(Sec); pnet->erase(&tp3->pstack()); pnet->erase(&tp2->pstack()); pnet->erase(&tp1->pstack()); tp1->close(); tp2->close(); tp3->close(); delete tp1; delete tp2; delete tp3; pnet->event_loop(0); } END_TEST // not run by default, hard coded port START_TEST(test_trac_380) { gu_conf_self_tstamp_on(); log_info << "START (test_trac_380)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); std::unique_ptr pnet(gcomm::Protonet::create(conf)); // caused either assertion or exception gcomm::Transport* tp1(gcomm::Transport::create( *pnet, "gmcast://127.0.0.1:4567?" "gmcast.group=test")); pnet->insert(&tp1->pstack()); tp1->connect(); try { pnet->event_loop(Sec); } catch (gu::Exception& e) { ck_assert_msg(e.get_errno() == EINVAL, "unexpected errno: %d, cause %s", e.get_errno(), e.what()); } pnet->erase(&tp1->pstack()); tp1->close(); delete tp1; pnet->event_loop(0); } END_TEST START_TEST(test_trac_828) { gu_conf_self_tstamp_on(); log_info << "START (test_trac_828)"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); std::unique_ptr pnet(gcomm::Protonet::create(conf)); // If the bug is present, this will throw because of own address being // in address list. try { Transport* tp(gcomm::Transport::create( *pnet, "gmcast://127.0.0.1:4567?" "gmcast.group=test&" "gmcast.listen_addr=tcp://127.0.0.1:4567")); delete tp; } catch (gu::Exception& e) { ck_abort_msg("test_trac_828, expcetion thrown because of having own " "address in address list"); } } END_TEST START_TEST(test_gmcast_ipv6) { log_info << "START test_gmcast_ipv6"; gu::Config conf; gu::ssl_register_params(conf); gcomm::Conf::register_params(conf); conf.set("base_host", "ip6-localhost"); gu_log_max_level = GU_LOG_DEBUG; std::unique_ptr pnet(gcomm::Protonet::create(conf)); // Without scheme { std::unique_ptr tp(gcomm::Transport::create( *pnet, "gmcast://[::1]:4567?" "gmcast.group=test&" "gmcast.listen_addr=tcp://[::1]:4567")); tp->connect(); tp->close(); } { std::unique_ptr tp(gcomm::Transport::create( *pnet, "gmcast://ip6-localhost:4567?" "gmcast.group=test&" "gmcast.listen_addr=tcp://ip6-localhost:4567")); tp->connect(); tp->close(); } { std::unique_ptr tp(gcomm::Transport::create( *pnet, "gmcast://[::1]?" "gmcast.group=test&" "gmcast.listen_addr=tcp://[::1]")); tp->connect(); tp->close(); } { std::unique_ptr tp(gcomm::Transport::create( *pnet, "gmcast://ip6-localhost?" "gmcast.group=test&" "gmcast.listen_addr=tcp://ip6-localhost")); tp->connect(); tp->close(); } { gcomm::Protolay::sync_param_cb_t spcb; std::unique_ptr tp(gcomm::Transport::create( *pnet, "gmcast://ip6-localhost?" "gmcast.group=test&" "gmcast.listen_addr=tcp://[2001:db8:10:9464::233]:4567")); log_info << tp->configured_listen_addr(); log_info << conf; ck_assert(tp->configured_listen_addr() == "tcp://[2001:db8:10:9464::233]:4567"); } log_info << "END test_gmcast_ipv6"; } END_TEST Suite* gmcast_nondet_suite() { Suite* s = suite_create("gmcast_nondet"); TCase* tc; if (test_multicast == true) { tc = tcase_create("test_gmcast_multicast"); tcase_add_test(tc, test_gmcast_multicast); suite_add_tcase(s, tc); } tc = tcase_create("test_gmcast_w_user_messages"); tcase_add_test(tc, test_gmcast_w_user_messages); tcase_set_timeout(tc, 30); suite_add_tcase(s, tc); // not run by default, hard coded port tc = tcase_create("test_gmcast_auto_addr"); tcase_add_test(tc, test_gmcast_auto_addr); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_forget"); tcase_add_test(tc, test_gmcast_forget); tcase_set_timeout(tc, 20); suite_add_tcase(s, tc); // not run by default, hard coded port tc = tcase_create("test_trac_380"); tcase_add_test(tc, test_trac_380); suite_add_tcase(s, tc); tc = tcase_create("test_trac_828"); tcase_add_test(tc, test_trac_828); suite_add_tcase(s, tc); tc = tcase_create("test_gmcast_ipv6"); tcase_add_test(tc, test_gmcast_ipv6); suite_add_tcase(s, tc); return s; } galera-4-26.4.25/gcomm/test/check_trace.hpp000644 000164 177776 00000040325 15107057155 021545 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2023 Codership Oy * * $Id$ */ /*! * Classes for tracing views and messages */ #include "gu_uri.hpp" #include "gu_datetime.hpp" #include "gcomm/datagram.hpp" #include "gcomm/uuid.hpp" #include "gcomm/protolay.hpp" #include "gcomm/protostack.hpp" #include "gcomm/transport.hpp" #include "gcomm/map.hpp" #include "gcomm/util.hpp" #include #include #include gu::Config& check_trace_conf(); extern "C" void check_trace_log_cb(int, const char*); namespace gcomm { class TraceMsg { public: TraceMsg(const UUID& source = UUID::nil(), const ViewId& source_view_id = ViewId(), const int64_t seq = -1) : source_(source), source_view_id_(source_view_id), seq_(seq) { } const UUID& source() const { return source_; } const ViewId& source_view_id() const { return source_view_id_; } int64_t seq() const { return seq_; } bool operator==(const TraceMsg& cmp) const { return (source_ == cmp.source_ && source_view_id_ == cmp.source_view_id_ && seq_ == cmp.seq_ ); } private: UUID source_; ViewId source_view_id_; int64_t seq_; }; std::ostream& operator<<(std::ostream& os, const TraceMsg& msg); class ViewTrace { public: ViewTrace(const View& view) : view_(view), msgs_() { } void insert_msg(const TraceMsg& msg) { switch (view_.type()) { case V_REG: gcomm_assert(view_.id() == msg.source_view_id()); gcomm_assert(contains(msg.source()) == true) << "msg source " << msg.source() << " not int view " << view_; break; case V_TRANS: gcomm_assert(view_.id().uuid() == msg.source_view_id().uuid() && view_.id().seq() == msg.source_view_id().seq()); break; case V_NON_PRIM: break; case V_PRIM: gcomm_assert(view_.id() == msg.source_view_id()) << " view id " << view_.id() << " source view " << msg.source_view_id(); gcomm_assert(contains(msg.source()) == true); break; case V_NONE: gu_throw_fatal; break; } if (view_.type() != V_NON_PRIM) { msgs_.push_back(msg); } } const View& view() const { return view_; } const std::deque& msgs() const { return msgs_; } bool operator==(const ViewTrace& cmp) const { // Note: Cannot compare joining members since seen differently // on different merging subsets return (view_.members() == cmp.view_.members() && view_.left() == cmp.view_.left() && view_.partitioned() == cmp.view_.partitioned() && msgs_ == cmp.msgs_ ); } private: bool contains(const UUID& uuid) const { return (view_.members().find(uuid) != view_.members().end() || view_.left().find(uuid) != view_.left().end() || view_.partitioned().find(uuid) !=view_.partitioned().end()); } View view_; std::deque msgs_; }; std::ostream& operator<<(std::ostream& os, const ViewTrace& vtr); class Trace { public: class ViewTraceMap : public Map { }; Trace() : views_(), current_view_(views_.end()) { } void insert_view(const View& view) { gu_trace(current_view_ = views_.insert_unique( std::make_pair(view.id(), ViewTrace(view)))); log_debug << view; } void insert_msg(const TraceMsg& msg) { gcomm_assert(current_view_ != views_.end()) << "no view set before msg delivery"; gu_trace(ViewTraceMap::value(current_view_).insert_msg(msg)); } const ViewTraceMap& view_traces() const { return views_; } const ViewTrace& current_view_trace() const { gcomm_assert(current_view_ != views_.end()); return ViewTraceMap::value(current_view_); } private: ViewTraceMap views_; ViewTraceMap::iterator current_view_; }; std::ostream& operator<<(std::ostream& os, const Trace& tr); class DummyTransport : public Transport { UUID uuid_; std::deque out_; bool queue_; static std::unique_ptr net_; static Protonet& get_net(); public: DummyTransport(const UUID& uuid = UUID::nil(), bool queue = true, const gu::URI& uri = gu::URI("dummy:")) : Transport(get_net(), uri), uuid_(uuid), out_(), queue_(queue) {} ~DummyTransport() { out_.clear(); } const UUID& uuid() const { return uuid_; } size_t mtu() const { return (1U << 31); } void connect(bool first) { } void close(bool force) { } void close(const UUID&) { } void connect() { } void listen() { gu_throw_fatal << "not implemented"; } Transport *accept() { gu_throw_fatal << "not implemented"; return 0; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { send_up(rb, um); } void set_queueing(bool val) { queue_ = val; } int handle_down(Datagram& wb, const ProtoDownMeta& dm) { if (queue_ == true) { // assert(wb.header().size() == 0); out_.push_back(new Datagram(wb)); return 0; } else { gu_trace(return send_down(wb, ProtoDownMeta(0xff, O_UNRELIABLE, uuid_))); } } bool empty() const { return out_.empty(); } Datagram* out() { if (out_.empty()) { return 0; } Datagram* rb = out_.front(); out_.pop_front(); return rb; } }; class DummyNode : public Toplay { public: DummyNode(gu::Config& conf, const size_t index, const gcomm::UUID& uuid, const std::list& protos) : Toplay (conf), index_ (index), uuid_ (uuid), protos_ (protos), cvi_ (), tr_ (), curr_seq_(0) { gcomm_assert(protos_.empty() == false); std::list::iterator i, i_next; i = i_next = protos_.begin(); for (++i_next; i_next != protos_.end(); ++i, ++i_next) { gu_trace(gcomm::connect(*i, *i_next)); } gu_trace(gcomm::connect(*i, this)); } ~DummyNode() { try { std::list::iterator i, i_next; i = i_next = protos_.begin(); for (++i_next; i_next != protos_.end(); ++i, ++i_next) { gu_trace(gcomm::disconnect(*i, *i_next)); } gu_trace(gcomm::disconnect(*i, this)); std::for_each(protos_.begin(), protos_.end(), gu::DeleteObject()); } catch(std::exception& e) { log_fatal << e.what(); abort(); } } const UUID& uuid() const { return uuid_; } std::list& protos() { return protos_; } size_t index() const { return index_; } void connect(bool first) { gu_trace(std::for_each(protos_.rbegin(), protos_.rend(), [first](Protolay* pl) { pl->connect(first); })); ; } void close(bool force = false) { for (std::list::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->close(); } } void close(const UUID& uuid) { for (std::list::iterator i = protos_.begin(); i != protos_.end(); ++i) { (*i)->close(uuid); } } void send() { const int64_t seq(curr_seq_); gu::byte_t buf[sizeof(seq)]; size_t sz; gu_trace(sz = gu::serialize8(seq, buf, sizeof(buf), 0)); Datagram dg(gu::Buffer(buf, buf + sz)); int err = send_down(dg, ProtoDownMeta(0)); if (err != 0) { log_debug << "failed to send: " << strerror(err); } else { ++curr_seq_; } } Datagram create_datagram() { const int64_t seq(curr_seq_); gu::byte_t buf[sizeof(seq)]; size_t sz; gu_trace(sz = gu::serialize8(seq, buf, sizeof(buf), 0)); return Datagram (gu::Buffer(buf, buf + sz)); } const Trace& trace() const { return tr_; } void set_cvi(const ViewId& vi) { log_debug << uuid() << " setting cvi to " << vi; cvi_ = vi; } bool in_cvi() const { for (Trace::ViewTraceMap::const_reverse_iterator i( tr_.view_traces().rbegin()); i != tr_.view_traces().rend(); ++i) { if (i->first.uuid() == cvi_.uuid() && i->first.type() == cvi_.type() && i->first.seq() >= cvi_.seq()) { return true; } } return false; } void handle_up(const void* cid, const Datagram& rb, const ProtoUpMeta& um) { if (rb.len() != 0) { gcomm_assert((um.source() == UUID::nil()) == false); // assert(rb.header().size() == 0); const gu::byte_t* begin(gcomm::begin(rb)); const size_t available(gcomm::available(rb)); // log_debug << um.source() << " " << uuid() // << " " << available ; // log_debug << rb.len() << " " << rb.offset() << " " // << rb.header_len(); if (available != 8) { log_info << "check_trace fail: " << available; } gcomm_assert(available == 8); int64_t seq; gu_trace(gu::unserialize8(begin, available, 0, seq)); tr_.insert_msg(TraceMsg(um.source(), um.source_view_id(), seq)); } else { gcomm_assert(um.has_view() == true); tr_.insert_view(um.view()); } } gu::datetime::Date handle_timers() { std::for_each(protos_.begin(), protos_.end(), [](Protolay* pl) { pl->handle_timers(); }); return gu::datetime::Date::max(); } private: size_t index_; UUID uuid_; std::list protos_; ViewId cvi_; Trace tr_; int64_t curr_seq_; }; class ChannelMsg { public: ChannelMsg(const Datagram& rb, const UUID& source) : rb_(rb), source_(source) { } const Datagram& rb() const { return rb_; } const UUID& source() const { return source_; } private: Datagram rb_; UUID source_; }; class Channel : public Bottomlay { public: Channel(gu::Config& conf, const size_t ttl = 1, const size_t latency = 1, const double loss = 1.) : Bottomlay(conf), ttl_(ttl), latency_(latency), loss_(loss), queue_() { } ~Channel() { } int handle_down(Datagram& wb, const ProtoDownMeta& dm) { gcomm_assert((dm.source() == UUID::nil()) == false); gu_trace(put(wb, dm.source())); return 0; } void put(const Datagram& rb, const UUID& source); ChannelMsg get(); void set_ttl(const size_t t) { ttl_ = t; } size_t ttl() const { return ttl_; } void set_latency(const size_t l) { gcomm_assert(l > 0); latency_ = l; } size_t latency() const { return latency_; } void set_loss(const double l) { loss_ = l; } double loss() const { return loss_; } size_t n_msgs() const { return queue_.size(); } private: size_t ttl_; size_t latency_; double loss_; std::deque > queue_; }; std::ostream& operator<<(std::ostream& os, const Channel& ch); std::ostream& operator<<(std::ostream& os, const Channel* ch); class MatrixElem { public: MatrixElem(const size_t ii, const size_t jj) : ii_(ii), jj_(jj) { } size_t ii() const { return ii_; } size_t jj() const { return jj_; } bool operator<(const MatrixElem& cmp) const { return (ii_ < cmp.ii_ || (ii_ == cmp.ii_ && jj_ < cmp.jj_)); } private: size_t ii_; size_t jj_; }; std::ostream& operator<<(std::ostream& os, const MatrixElem& me); class ChannelMap : public Map { public: struct DeleteObject { void operator()(ChannelMap::value_type& vt) { delete ChannelMap::value(vt); } }; }; class NodeMap : public Map { public: struct DeleteObject { void operator()(NodeMap::value_type& vt) { delete NodeMap::value(vt); } }; }; class PropagationMatrix { public: PropagationMatrix() : tp_(), prop_() { // Some tests which deal with timer expiration require that // the current time is far enough from zero. Start from // 100 secs after zero, this should give enough headroom // for all tests. gu::datetime::SimClock::init(100*gu::datetime::Sec); // Uncomment this to get logs with simulated timestamps. // The low will be written into stderr. // gu_log_cb = check_trace_log_cb; } ~PropagationMatrix(); void insert_tp(DummyNode* t); void set_latency(const size_t ii, const size_t jj, const size_t lat); void set_loss(const size_t ii, const size_t jj, const double loss); void split(const size_t ii, const size_t jj); void merge(const size_t ii, const size_t jj, const double loss = 1.0); void propagate_n(size_t n); void propagate_until_empty(); void propagate_until_cvi(bool handle_timers); friend std::ostream& operator<<(std::ostream&,const PropagationMatrix&); private: void expire_timers(); size_t count_channel_msgs() const; bool all_in_cvi() const; NodeMap tp_; ChannelMap prop_; }; std::ostream& operator<<(std::ostream& os, const PropagationMatrix& prop); // Cross check traces from vector of dummy nodes void check_trace(const std::vector& nvec); } // namespace gcomm galera-4-26.4.25/gcomm/SConscript000644 000164 177776 00000000141 15107057155 017624 0ustar00jenkinsnogroup000000 000000 # SCons build script for building gcomm SConscript(Split('''src/SConscript test/SConscript''')) galera-4-26.4.25/gcs/000755 000164 177776 00000000000 15107057160 015264 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcs/src/000755 000164 177776 00000000000 15107057160 016053 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcs/src/gcs_params.cpp000644 000164 177776 00000022544 15107057155 020711 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2017 Codership Oy * * $Id$ */ #include "gcs_params.hpp" #include "gcs_fc.hpp" // gcs_fc_hard_limit_fix #include "gu_inttypes.hpp" #include "gu_config.hpp" // gu::Config::Flag #include const char* const GCS_PARAMS_FC_FACTOR = "gcs.fc_factor"; const char* const GCS_PARAMS_FC_LIMIT = "gcs.fc_limit"; const char* const GCS_PARAMS_FC_MASTER_SLAVE = "gcs.fc_master_slave"; const char* const GCS_PARAMS_FC_SINGLE_PRIMARY = "gcs.fc_single_primary"; const char* const GCS_PARAMS_FC_DEBUG = "gcs.fc_debug"; const char* const GCS_PARAMS_SYNC_DONOR = "gcs.sync_donor"; const char* const GCS_PARAMS_MAX_PKT_SIZE = "gcs.max_packet_size"; const char* const GCS_PARAMS_RECV_Q_HARD_LIMIT = "gcs.recv_q_hard_limit"; const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT = "gcs.recv_q_soft_limit"; const char* const GCS_PARAMS_MAX_THROTTLE = "gcs.max_throttle"; #ifdef GCS_SM_DEBUG const char* const GCS_PARAMS_SM_DUMP = "gcs.sm_dump"; #endif /* GCS_SM_DEBUG */ static const char* const GCS_PARAMS_FC_FACTOR_DEFAULT = "1.0"; static const char* const GCS_PARAMS_FC_LIMIT_DEFAULT = "16"; static const char* const GCS_PARAMS_FC_MASTER_SLAVE_DEFAULT = "no"; static const char* const GCS_PARAMS_FC_SINGLE_PRIMARY_DEFAULT = "no"; static const char* const GCS_PARAMS_FC_DEBUG_DEFAULT = "0"; static const char* const GCS_PARAMS_SYNC_DONOR_DEFAULT = "no"; static const char* const GCS_PARAMS_MAX_PKT_SIZE_DEFAULT = "64500"; static ssize_t const GCS_PARAMS_RECV_Q_HARD_LIMIT_DEFAULT = SSIZE_MAX; static const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT_DEFAULT = "0.25"; static const char* const GCS_PARAMS_MAX_THROTTLE_DEFAULT = "0.25"; static bool gcs_params_register(gu_config_t* const conf) { bool ret = 0; ret |= gu_config_add (conf, GCS_PARAMS_FC_FACTOR, GCS_PARAMS_FC_FACTOR_DEFAULT, gu::Config::Flag::type_double); ret |= gu_config_add (conf, GCS_PARAMS_FC_LIMIT, GCS_PARAMS_FC_LIMIT_DEFAULT, gu::Config::Flag::type_integer); ret |= gu_config_add (conf, GCS_PARAMS_FC_MASTER_SLAVE, GCS_PARAMS_FC_MASTER_SLAVE_DEFAULT, gu::Config::Flag::deprecated | gu::Config::Flag::type_bool); ret |= gu_config_add (conf, GCS_PARAMS_FC_SINGLE_PRIMARY, GCS_PARAMS_FC_SINGLE_PRIMARY_DEFAULT, gu::Config::Flag::read_only | gu::Config::Flag::type_bool); ret |= gu_config_add (conf, GCS_PARAMS_FC_DEBUG, GCS_PARAMS_FC_DEBUG_DEFAULT, gu::Config::Flag::type_integer); ret |= gu_config_add (conf, GCS_PARAMS_SYNC_DONOR, GCS_PARAMS_SYNC_DONOR_DEFAULT, gu::Config::Flag::type_bool); ret |= gu_config_add (conf, GCS_PARAMS_MAX_PKT_SIZE, GCS_PARAMS_MAX_PKT_SIZE_DEFAULT, gu::Config::Flag::type_integer); char tmp[32] = { 0, }; snprintf (tmp, sizeof(tmp) - 1, "%lld", (long long)GCS_PARAMS_RECV_Q_HARD_LIMIT_DEFAULT); ret |= gu_config_add (conf, GCS_PARAMS_RECV_Q_HARD_LIMIT, tmp, gu::Config::Flag::type_integer); ret |= gu_config_add (conf, GCS_PARAMS_RECV_Q_SOFT_LIMIT, GCS_PARAMS_RECV_Q_SOFT_LIMIT_DEFAULT, gu::Config::Flag::type_double); ret |= gu_config_add (conf, GCS_PARAMS_MAX_THROTTLE, GCS_PARAMS_MAX_THROTTLE_DEFAULT, gu::Config::Flag::type_double); #ifdef GCS_SM_DEBUG ret |= gu_config_add (conf, GCS_PARAMS_SM_DUMP, "0", 0); #endif /* GCS_SM_DEBUG */ return ret; } void gcs_params::register_params(gu::Config& conf) { if (gcs_params_register(reinterpret_cast(&conf))) { gu_throw_fatal << "Failed to register GCS parameters"; } } static long params_init_bool (gu_config_t* conf, const char* const name, bool* const var) { bool val; long rc = gu_config_get_bool(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else if (rc > 0) { assert(0); val = false; rc = -EINVAL; } *var = val; return rc; } static long params_init_long (gu_config_t* conf, const char* const name, long min_val, long max_val, long* const var) { int64_t val; long rc = gu_config_get_int64(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if (max_val == min_val) { max_val = LONG_MAX; min_val = LONG_MIN; } if (val < min_val || val > max_val) { gu_error ("%s value out of range [%ld, %ld]: %" PRIi64, name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } static long params_init_int64 (gu_config_t* conf, const char* const name, int64_t const min_val, int64_t const max_val, int64_t* const var) { int64_t val; long rc = gu_config_get_int64(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if ((min_val != max_val) && (val < min_val || val > max_val)) { gu_error ("%s value out of range [%" PRIi64 ", %" PRIi64 "]: %" PRIi64, name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } static long params_init_double (gu_config_t* conf, const char* const name, double const min_val, double const max_val, double* const var) { double val; long rc = gu_config_get_double(conf, name, &val); if (rc < 0) { /* Cannot parse parameter value */ gu_error ("Bad %s value", name); return rc; } else { /* Found parameter value */ if ((min_val != max_val) && (val < min_val || val > max_val)) { gu_error ("%s value out of range [%f, %f]: %f", name, min_val, max_val, val); return -EINVAL; } } *var = val; return 0; } static void deprecation_warning(gu_config_t* config, const char* deprecated, const char* current) { if (gu_config_is_set(config, deprecated)) { gu_warn("Option '%s' is deprecated and will be removed in the " "future versions, please use '%s' instead. ", deprecated, current); } } static int gcs_params_init (struct gcs_params* const params, gu_config_t* const config) { int ret; if ((ret = params_init_long (config, GCS_PARAMS_FC_LIMIT, 0, LONG_MAX, ¶ms->fc_base_limit))) return ret; if ((ret = params_init_long (config, GCS_PARAMS_FC_DEBUG, 0, LONG_MAX, ¶ms->fc_debug))) return ret; if ((ret = params_init_long (config, GCS_PARAMS_MAX_PKT_SIZE, 0, LONG_MAX, ¶ms->max_packet_size))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_FC_FACTOR, 0.0, 1.0, ¶ms->fc_resume_factor))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_RECV_Q_SOFT_LIMIT, 0.0, 1.0 - 1.e-9, ¶ms->recv_q_soft_limit))) return ret; if ((ret = params_init_double (config, GCS_PARAMS_MAX_THROTTLE, 0.0, 1.0 - 1.e-9, ¶ms->max_throttle))) return ret; int64_t tmp; if ((ret = params_init_int64 (config, GCS_PARAMS_RECV_Q_HARD_LIMIT, 0, 0, &tmp))) return ret; params->recv_q_hard_limit = tmp * gcs_fc_hard_limit_fix; // allow for some meta overhead if ((ret = params_init_bool (config, GCS_PARAMS_FC_MASTER_SLAVE, ¶ms->fc_single_primary))) return ret; if (params->fc_single_primary) { // if GCS_PARAMS_FC_MASTER_SLAVE was used, log deprecation warning deprecation_warning(config, GCS_PARAMS_FC_MASTER_SLAVE, GCS_PARAMS_FC_SINGLE_PRIMARY); } else { // Overrides deprecated GCS_PARAMS_FC_MASTER_SLAVE if set if ((ret = params_init_bool (config, GCS_PARAMS_FC_SINGLE_PRIMARY, ¶ms->fc_single_primary))) return ret; } if ((ret = params_init_bool (config, GCS_PARAMS_SYNC_DONOR, ¶ms->sync_donor))) return ret; return 0; } gcs_params::gcs_params(gu::Config& conf) : fc_resume_factor(), recv_q_soft_limit(), max_throttle(), recv_q_hard_limit(), fc_base_limit(), max_packet_size(), fc_debug(), fc_single_primary(), sync_donor() { int const ret(gcs_params_init(this, reinterpret_cast(&conf))); if (0 != ret) { gu_throw_error(-ret); } } galera-4-26.4.25/gcs/src/gcs_fc.hpp000644 000164 177776 00000003676 15107057155 020030 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! @file This unit contains Flow Control parts deemed worthy to be * taken out of gcs.c */ #ifndef _gcs_fc_h_ #define _gcs_fc_h_ #include #include #include typedef struct gcs_fc { ssize_t hard_limit; // hard limit for slave queue size ssize_t soft_limit; // soft limit for slave queue size, after it FC kicks in double max_throttle; // limit on how much we can throttle replication ssize_t init_size; // initial queue size ssize_t size; // current queue size ssize_t last_sleep; // queue size when last sleep happened ssize_t act_count; // action count double max_rate; // normal replication data rate (byte/s) double scale; // data rate scale factor double offset; // data rate offset (rate = scale*size + offset) long long start; // beginning of the time interval (nanosec, monotonic) long debug; // how often to print debug messages, 0 - never ssize_t sleep_count; double sleeps; } gcs_fc_t; extern double const gcs_fc_hard_limit_fix; //! allow for some overhead /*! Initializes operational constants before oprning connection to group */ extern int gcs_fc_init (gcs_fc_t* fc, ssize_t hard_limit, // hard limit double soft_limit, // soft limit as a fraction of hard limit double max_throttle); /*! Reinitializes object at the beginning of state transfer */ extern void gcs_fc_reset (gcs_fc_t* fc, ssize_t queue_size); /*! Processes a new action added to a slave queue. * @return nanoseconds to sleep or -ENOMEM in case of hitting * hard limit or GU_TIME_ETERNITY to pause forever */ extern long long gcs_fc_process (gcs_fc_t* fc, ssize_t act_size); /*! Print debug info every debug_level'th call to gcs_fc_process. */ extern void gcs_fc_debug (gcs_fc_t* fc, long debug_level); #endif /* _gcs_fc_h_ */ galera-4-26.4.25/gcs/src/gcs_act_cchange.cpp000644 000164 177776 00000016400 15107057155 021637 0ustar00jenkinsnogroup000000 000000 // // Copyright (C) 2015-2017 Codership Oy // #include "gcs.hpp" #include "gu_digest.hpp" #include "gu_hexdump.hpp" #include "gu_uuid.hpp" #include "gu_macros.hpp" #include "gu_logger.hpp" #include "gu_limits.h" #include #include #include gcs_act_cchange::gcs_act_cchange() : memb (), uuid (GU_UUID_NIL), seqno (GCS_SEQNO_ILL), conf_id (-1), vote_seqno (GCS_SEQNO_ILL), vote_res (0), repl_proto_ver(-1), appl_proto_ver(-1) {} enum Version { VER0 = 0 }; static Version _version(int ver) { switch(ver) { case VER0: return VER0; default: assert(0); gu_throw_error(EPROTO) << "Unsupported CC action version"; throw; } } // sufficiently big array to cover all potential checksum sizes typedef char checksum_t[16]; static inline int _checksum_len(Version const ver) { int ret(0); switch(ver) { case VER0: ret = 8; break; default: assert(0); } assert(ret < int(sizeof(checksum_t))); return ret; } static void _checksum(Version const ver, const void* const buf, size_t const size, checksum_t& res) { switch(ver) { case VER0: gu::FastHash::digest(buf, size, res); return; default: assert(0); } } static inline gcs_node_state_t _int_to_node_state(int const s) { if (gu_unlikely(s < 0 || s >= GCS_NODE_STATE_MAX)) { assert(0); gu_throw_error(EINVAL) << "No such node state: " << s; } return gcs_node_state_t(s); } gcs_act_cchange::gcs_act_cchange(const void* const cc_buf, int const cc_size) : memb (), uuid (), seqno (), conf_id (), vote_seqno (), vote_res (), repl_proto_ver(), appl_proto_ver() { const char* b(static_cast(cc_buf)); Version const cc_ver(_version(b[0])); int const check_len(_checksum_len(cc_ver)); int const check_offset(cc_size - check_len); checksum_t check; _checksum(cc_ver, cc_buf, check_offset, check); if (gu_unlikely(!std::equal(b + check_offset, b + cc_size, check))) { std::vector debug(check_offset); std::copy(b + 1, b + check_offset, debug.begin()); debug[check_offset - 1] = '\0'; gu_throw_error(EINVAL) << "CC action checksum mismatch. Found " << gu::Hexdump(b + check_offset, check_len) << " at offset " << check_offset << ", computed " << gu::Hexdump(check, sizeof(check)) << ", action contents: '" << debug.data() << "'"; } b += 1; // skip version byte int const str_len(::strlen(b)); std::string const ist(b, str_len); std::istringstream is(ist); char c; int msg_ver; int memb_num; is >> msg_ver >> c >> repl_proto_ver >> c >> appl_proto_ver >> c >> uuid >> c >> seqno >> c >> conf_id >> c >> vote_seqno >> c >> vote_res >> c >> memb_num; assert(cc_ver == msg_ver); b += str_len + 1; // memb array offset memb.reserve(memb_num); for (int i(0); i < memb_num; ++i) { gcs_act_cchange::member m; size_t id_len(::strlen(b)); gu_uuid_scan(b, id_len, &m.uuid_); b += id_len + 1; m.name_ = b; b += m.name_.length() + 1; m.incoming_ = b; b += m.incoming_.length() + 1; b += gu::unserialize8(b, 0, m.cached_); m.state_ = _int_to_node_state(b[0]); ++b; memb.push_back(m); } assert(b - static_cast(cc_buf) <= check_offset); } static size_t _memb_size(const std::vector& m) { size_t ret(0); for (size_t i(0); i < m.size(); ++i) { ret += GU_UUID_STR_LEN + 1; ret += m[i].name_.length() + 1; ret += m[i].incoming_.length() + 1; ret += sizeof(gcs_seqno_t); // lowest cached ret += sizeof(char); // state } return ret; } static size_t _strcopy(const std::string& str, char* ptr) { std::copy(str.begin(), str.end(), ptr); return str.length(); } int gcs_act_cchange::write(void** buf) const { Version const cc_ver(VER0); std::ostringstream os; os << cc_ver << ',' << repl_proto_ver << ',' << appl_proto_ver << ',' << uuid << ':' << seqno << ',' << conf_id << ',' << vote_seqno << ',' << vote_res << ',' << memb.size(); std::string const str(os.str()); int const payload_len(1 + str.length() + 1 + _memb_size(memb)); int const check_len(_checksum_len(cc_ver)); // checksum length // total message length, with necessary padding for alignment int const ret(GU_ALIGN(payload_len + check_len, GU_MIN_ALIGNMENT)); /* using malloc() for C compatibility */ *buf = ::malloc(ret); if (NULL == *buf) { gu_throw_error(ENOMEM) << "Failed to allocate " << ret << " bytes for configuration change event."; } ::memset(*buf, 0, ret); // initialize char* b(static_cast(*buf)); assert(cc_ver < std::numeric_limits::max()); b[0] = cc_ver; ++b; b += _strcopy(str, b); b[0] = '\0'; ++b; for (size_t i(0); i < memb.size(); ++i) { const gcs_act_cchange::member& m(memb[i]); b += gu_uuid_print(&m.uuid_, b, GU_UUID_STR_LEN+1); b[0] = '\0'; ++b; b += _strcopy(m.name_, b); b[0] = '\0'; ++b; b += _strcopy(m.incoming_, b); b[0] = '\0'; ++b; b += gu::serialize8(m.cached_, b, 0); b[0] = m.state_; ++b; } int const check_offset(ret - check_len); // writing checksum to the end assert(gu::ptr_offset(*buf, check_offset) >= b); b = static_cast(gu::ptr_offset(*buf, check_offset)); checksum_t check; _checksum(cc_ver, *buf, check_offset, check); log_debug << "Writing down CC checksum: " << gu::Hexdump(check, sizeof(check)) << " at offset " << check_offset; std::copy(check, check + check_len, b); b += check_len; assert(gu::ptr_offset(*buf, ret) == b); return ret; } bool gcs_act_cchange::member::operator==(const gcs_act_cchange::member& other) const { return ( uuid_ == other.uuid_ && name_ == other.name_ && incoming_ == other.incoming_ && cached_ == other.cached_ && state_ == other.state_ ); } bool gcs_act_cchange::operator==(const gcs_act_cchange& other) const { return ( repl_proto_ver == other.repl_proto_ver && appl_proto_ver == other.appl_proto_ver && uuid == other.uuid && seqno == other.seqno && conf_id == other.conf_id && memb == other.memb ); } std::ostream& operator <<(std::ostream& os, const struct gcs_act_cchange& cc) { os << "Version(repl,appl): " << cc.repl_proto_ver << ',' << cc.appl_proto_ver << '\n' << "GTID: " << cc.uuid << ':' << cc.seqno << ", " << "conf ID: " << cc.conf_id << '\n' << "Vote(seqno:res): " << cc.vote_seqno << ':' << cc.vote_res << '\n' << "Members #: " << cc.memb.size(); return os; } galera-4-26.4.25/gcs/src/gcs_msg_type.hpp000644 000164 177776 00000002610 15107057155 021252 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ /* * Message types. */ #ifndef _gcs_msg_type_h_ #define _gcs_msg_type_h_ // NOTE! When changing this enumaration, make sure to change // gcs_msg_type_string[] in gcs_msg_type.c typedef enum gcs_msg_type { GCS_MSG_ERROR, // error happened when recv() GCS_MSG_ACTION, // action fragment GCS_MSG_LAST, // report about last applied action GCS_MSG_COMPONENT, // new component GCS_MSG_STATE_UUID,// state exchange UUID message GCS_MSG_STATE_MSG, // state exchange message GCS_MSG_JOIN, // massage saying that the node completed state transfer GCS_MSG_SYNC, // message saying that the node has synced with group GCS_MSG_FLOW, // flow control message GCS_MSG_VOTE, // vote message GCS_MSG_CAUSAL, // causality token GCS_MSG_MAX } gcs_msg_type_t; extern const char* gcs_msg_type_string[GCS_MSG_MAX]; /* Types of private actions - should not care, * must be defined and used by the application */ /* Types of regular configuration mesages (both PRIM/NON_PRIM) */ typedef enum gcs_reg_type { GCS_REG_JOIN, // caused by member JOIN GCS_REG_LEAVE, // caused by member LEAVE GCS_REG_DISCONNECT, // caused by member DISCONNECT GCS_REG_NETWORK // caused by NETWORK failure? } gcs_reg_type_t; #endif // _gcs_message_h_ galera-4-26.4.25/gcs/src/gcs_sm.cpp000644 000164 177776 00000015053 15107057155 020042 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! * @file GCS Send Monitor. To ensure fair (FIFO) access to gcs_core_send() */ #include "gcs_sm.hpp" #include static void sm_init_stats (gcs_sm_stats_t* stats) { stats->sample_start = gu_time_monotonic(); stats->pause_start = 0; stats->paused_ns = 0; stats->paused_sample = 0; stats->send_q_samples = 0; stats->send_q_len = 0; stats->send_q_len_max = 0; stats->send_q_len_min = 0; } gcs_sm_t* gcs_sm_create (long len, long n) { if ((len < 2 /* 2 is minimum */) || (len & (len - 1))) { gu_error ("Monitor length parameter is not a power of 2: %ld", len); return NULL; } if (n < 1) { gu_error ("Invalid monitor concurrency parameter: %ld", n); return NULL; } size_t sm_size = sizeof(gcs_sm_t) + len * sizeof(((gcs_sm_t*)(0))->wait_q[0]); gcs_sm_t* sm = static_cast(gu_malloc(sm_size)); if (sm) { sm_init_stats (&sm->stats); gu_mutex_init (&sm->lock, NULL); gu_cond_init (&sm->cond, NULL); sm->cond_wait = 0; sm->wait_q_len = len; sm->wait_q_mask = sm->wait_q_len - 1; sm->wait_q_head = 1; sm->wait_q_tail = 0; sm->users = 0; sm->users_max = 0; sm->users_min = 0; sm->entered = 0; sm->ret = 0; #ifdef GCS_SM_CONCURRENCY sm->cc = n; // concurrency param. #endif /* GCS_SM_CONCURRENCY */ sm->pause = false; sm->wait_time = gu::datetime::Sec; #ifdef GCS_SM_DEBUG memset (&sm->history, 0, sizeof(sm->history)); sm->history_line = GCS_SM_HIST_LEN - 1; // point to the last line #endif memset (sm->wait_q, 0, sm->wait_q_len * sizeof(sm->wait_q[0])); } return sm; } long gcs_sm_close (gcs_sm_t* sm) { gu_info ("Closing send monitor..."); if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); sm->ret = -EBADFD; if (sm->pause) _gcs_sm_continue_common (sm); gu_cond_t cond; gu_cond_init (&cond, NULL); // in case the queue is full while (sm->users >= (long)sm->wait_q_len) { gu_mutex_unlock (&sm->lock); usleep(1000); gu_mutex_lock (&sm->lock); } while (sm->users > 0) { // wait for cleared queue sm->users++; GCS_SM_INCREMENT(sm->wait_q_tail); _gcs_sm_enqueue_common (sm, &cond, true, sm->wait_q_tail); sm->users--; GCS_SM_INCREMENT(sm->wait_q_head); } gu_cond_destroy (&cond); gu_mutex_unlock (&sm->lock); gu_info ("Closed send monitor."); return 0; } long gcs_sm_open (gcs_sm_t* sm) { long ret = -1; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); if (-EBADFD == sm->ret) /* closed */ { sm->ret = 0; } ret = sm->ret; gu_mutex_unlock (&sm->lock); if (ret) { gu_error ("Can't open send monitor: wrong state %ld", ret); } return ret; } void gcs_sm_destroy (gcs_sm_t* sm) { gu_mutex_destroy(&sm->lock); gu_cond_destroy(&sm->cond); gu_free (sm); } void gcs_sm_stats_get (gcs_sm_t* sm, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg, long long* paused_ns, double* paused_avg) { gcs_sm_stats_t tmp; long long now; bool paused; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); *q_len_max = sm->users_max; *q_len_min = sm->users_min; *q_len = sm->users; tmp = sm->stats; now = gu_time_monotonic(); paused = sm->pause; gu_mutex_unlock (&sm->lock); if (paused) { // taking sample in a middle of a pause tmp.paused_ns += now - tmp.pause_start; } *paused_ns = tmp.paused_ns; if (gu_likely(tmp.paused_ns >= 0)) { *paused_avg = ((double)(tmp.paused_ns - tmp.paused_sample)) / (now - tmp.sample_start); } else { *paused_avg = -1.0; } if (gu_likely(tmp.send_q_len >= 0 && tmp.send_q_samples >= 0)){ if (gu_likely(tmp.send_q_samples > 0)) { *q_len_avg = ((double)tmp.send_q_len) / tmp.send_q_samples; } else { *q_len_avg = 0.0; } } else { *q_len_avg = -1.0; } } void gcs_sm_stats_flush(gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); long long const now = gu_time_monotonic(); sm->stats.sample_start = now; sm->stats.paused_sample = sm->stats.paused_ns; if (sm->pause) // append elapsed pause time { sm->stats.paused_sample += now - sm->stats.pause_start; } sm->stats.send_q_len = 0; sm->stats.send_q_len_max = 0; sm->stats.send_q_len_min = 0; sm->stats.send_q_samples = 0; sm->users_max = sm->users; sm->users_min = sm->users; gu_mutex_unlock (&sm->lock); } #ifdef GCS_SM_DEBUG void _gcs_sm_dump_state_common(gcs_sm_t* sm, FILE* file) { fprintf( file, "\nSend monitor state:" "\n===================" "\n\twait_q_len: %lu" "\n\twait_q_head: %lu" "\n\twait_q_tail: %lu" "\n\tusers: %ld" "\n\tentered: %ld" "\n\tpaused: %s" "\n\tstatus: %ld\n", sm->wait_q_len, sm->wait_q_head, sm->wait_q_tail, sm->users, sm->entered, sm->pause ? "yes" : "no", sm->ret ); fprintf( file, "\nSend monitor queue:" "\n===================\n" ); unsigned long const pad(32); unsigned long const q_start(sm->wait_q_head >= pad ? sm->wait_q_head - pad : sm->wait_q_len + sm->wait_q_head - pad); unsigned long const q_end ((sm->wait_q_tail + pad) % sm->wait_q_len); for (unsigned long i(q_start); i != q_end; GCS_SM_INCREMENT(i)) { fprintf(file, "%5lu, %d\t", i, sm->wait_q[i].wait); } fprintf( file, "\n\nSend monitor history:" "\n=====================\n" ); int line(sm->history_line); do { line = (line + 1) % GCS_SM_HIST_LEN; fputs(sm->history[line], file); } while (line != sm->history_line); fputs("-----------------------------\n", file); } void gcs_sm_dump_state(gcs_sm_t* sm, FILE* file) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); _gcs_sm_dump_state_common(sm, file); gu_mutex_unlock (&sm->lock); } #endif /* GCS_SM_DEBUG */ galera-4-26.4.25/gcs/src/gcs_state_msg.cpp000644 000164 177776 00000104233 15107057155 021410 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2024 Codership Oy * * $Id$ */ /* * Interface to state messages - implementation * */ #define __STDC_LIMIT_MACROS #include #include #include #include #include #define GCS_STATE_MSG_VER 6 #define GCS_STATE_MSG_NO_PROTO_DOWNGRADE_VER 6 #define GCS_STATE_MSG_ACCESS #include "gcs_state_msg.hpp" #include "gcs_node.hpp" #include "gu_logger.hpp" gcs_state_msg_t* gcs_state_msg_create (const gu_uuid_t* state_uuid, const gu_uuid_t* group_uuid, const gu_uuid_t* prim_uuid, gcs_seqno_t prim_seqno, gcs_seqno_t received, gcs_seqno_t cached, gcs_seqno_t last_applied, gcs_seqno_t vote_seqno, int64_t vote_res, uint8_t vote_policy, int prim_joined, gcs_node_state_t prim_state, gcs_node_state_t current_state, const char* name, const char* inc_addr, int gcs_proto_ver, /* max supported versions*/ int repl_proto_ver, int appl_proto_ver, int prim_gcs_ver, /* last prim versions*/ int prim_repl_ver, int prim_appl_ver, int desync_count, uint8_t flags) { #define CHECK_PROTO_RANGE(LEVEL) \ if (LEVEL < (int)0 || LEVEL > (int)UINT8_MAX) { \ gu_error(#LEVEL " value %d is out of range [0, %d]", LEVEL,UINT8_MAX); \ return NULL; \ } CHECK_PROTO_RANGE(gcs_proto_ver); CHECK_PROTO_RANGE(repl_proto_ver); CHECK_PROTO_RANGE(appl_proto_ver); CHECK_PROTO_RANGE(prim_gcs_ver); CHECK_PROTO_RANGE(prim_repl_ver); CHECK_PROTO_RANGE(prim_appl_ver); size_t name_len = strlen(name) + 1; size_t addr_len = strlen(inc_addr) + 1; gcs_state_msg_t* ret = static_cast( gu_calloc (1, sizeof (gcs_state_msg_t) + name_len + addr_len)); if (ret) { ret->state_uuid = *state_uuid; ret->group_uuid = *group_uuid; ret->prim_uuid = *prim_uuid; ret->prim_joined = prim_joined; ret->prim_seqno = prim_seqno; ret->received = received; ret->cached = cached; ret->last_applied = last_applied; ret->vote_seqno = vote_seqno; ret->vote_res = vote_res; ret->vote_policy = vote_policy; ret->prim_state = prim_state; ret->current_state = current_state; ret->version = GCS_STATE_MSG_VER; ret->gcs_proto_ver = gcs_proto_ver; ret->repl_proto_ver= repl_proto_ver; ret->appl_proto_ver= appl_proto_ver; ret->prim_gcs_ver = prim_gcs_ver; ret->prim_repl_ver = prim_repl_ver; ret->prim_appl_ver = prim_appl_ver; ret->desync_count = desync_count; ret->name = (char*)(ret + 1); ret->inc_addr = ret->name + name_len; ret->flags = flags; // tmp is a workaround for some combination of GCC flags which don't // allow passing ret->name and ret->inc_addr directly even with casting // char* tmp = (char*)ret->name; strcpy ((char*)ret->name, name); // tmp = (char*)ret->inc_addr; strcpy ((char*)ret->inc_addr, inc_addr); } return ret; } void gcs_state_msg_destroy (gcs_state_msg_t* state) { gu_free (state); } /* Returns length needed to serialize gcs_state_msg_t for sending */ size_t gcs_state_msg_len (gcs_state_msg_t* state) { return ( sizeof (int8_t) + // version (reserved) sizeof (int8_t) + // flags sizeof (int8_t) + // gcs_proto_ver sizeof (int8_t) + // repl_proto_ver sizeof (int8_t) + // prim_state sizeof (int8_t) + // curr_state sizeof (int16_t) + // prim_joined sizeof (gu_uuid_t) + // state_uuid sizeof (gu_uuid_t) + // group_uuid sizeof (gu_uuid_t) + // conf_uuid sizeof (int64_t) + // received sizeof (int64_t) + // prim_seqno strlen (state->name) + 1 + strlen (state->inc_addr) + 1 + // V1-2 stuff sizeof (uint8_t) + // appl_proto_ver (in preparation for V1) // V3 stuff sizeof (int64_t) + // cached // V4 stuff sizeof (int32_t) + // desync count // V5 stuff sizeof (int64_t) + // last_applied sizeof (int64_t) + // vote_seqno sizeof (int64_t) + // vote_res sizeof (uint8_t) + // vote_policy // V6 stuff sizeof (int8_t) + // prim_gcs_ver sizeof (int8_t) + // prim_repl_ver sizeof (int8_t) + // prim_appl_ver 0 ); } #define STATE_MSG_FIELDS_V0(buf) \ int8_t* version = (int8_t*)buf; \ int8_t* flags = version + 1; \ int8_t* gcs_proto_ver = flags + 1; \ int8_t* repl_proto_ver = gcs_proto_ver + 1; \ int8_t* prim_state = repl_proto_ver + 1; \ int8_t* curr_state = prim_state + 1; \ int16_t* prim_joined = (int16_t*)(curr_state + 1); \ gu_uuid_t* state_uuid = (gu_uuid_t*)(prim_joined + 1); \ gu_uuid_t* group_uuid = state_uuid + 1; \ gu_uuid_t* prim_uuid = group_uuid + 1; \ int64_t* received = (int64_t*)(prim_uuid + 1); \ int64_t* prim_seqno = received + 1; \ char* name = (char*)(prim_seqno + 1); #define CONST_STATE_MSG_FIELDS_V0(buf) \ const int8_t* version = (int8_t*)buf; \ const int8_t* flags = version + 1; \ const int8_t* gcs_proto_ver = flags + 1; \ const int8_t* repl_proto_ver = gcs_proto_ver + 1; \ const int8_t* prim_state = repl_proto_ver + 1; \ const int8_t* curr_state = prim_state + 1; \ const int16_t* prim_joined = (int16_t*)(curr_state + 1); \ const gu_uuid_t* state_uuid = (gu_uuid_t*)(prim_joined + 1); \ const gu_uuid_t* group_uuid = state_uuid + 1; \ const gu_uuid_t* prim_uuid = group_uuid + 1; \ const int64_t* received = (int64_t*)(prim_uuid + 1); \ const int64_t* prim_seqno = received + 1; \ const char* name = (char*)(prim_seqno + 1); /* Serialize gcs_state_msg_t into buf */ ssize_t gcs_state_msg_write (void* buf, const gcs_state_msg_t* state) { STATE_MSG_FIELDS_V0(buf); char* inc_addr = name + strlen (state->name) + 1; uint8_t* appl_proto_ver = (uint8_t*)(inc_addr + strlen(state->inc_addr) +1); // V3 stuff int64_t* cached = (int64_t*)(appl_proto_ver + 1); // V4 stuff int32_t* desync_count = (int32_t*)(cached + 1); // V5 stuff int64_t* last_applied = (int64_t*)(desync_count + 1); int64_t* vote_seqno = last_applied + 1; int64_t* vote_res = vote_seqno + 1; uint8_t* vote_policy = (uint8_t*)(vote_res + 1); // V6 stuff uint8_t* prim_gcs_ver = (uint8_t*)(vote_policy + 1); uint8_t* prim_repl_ver = (uint8_t*)(prim_gcs_ver + 1); uint8_t* prim_appl_ver = (uint8_t*)(prim_repl_ver + 1); *version = GCS_STATE_MSG_VER; *flags = state->flags; *gcs_proto_ver = state->gcs_proto_ver; *repl_proto_ver = state->repl_proto_ver; *prim_state = state->prim_state; *curr_state = state->current_state; *prim_joined = htog16(((int16_t)state->prim_joined)); *state_uuid = state->state_uuid; *group_uuid = state->group_uuid; *prim_uuid = state->prim_uuid; *received = htog64(state->received); *prim_seqno = htog64(state->prim_seqno); /* from this point alignment breaks */ strcpy (name, state->name); strcpy (inc_addr, state->inc_addr); *appl_proto_ver = state->appl_proto_ver; // in preparation for V1 gu::serialize8(state->cached, cached, 0); gu::serialize4(state->desync_count, desync_count, 0); gu::serialize8(state->last_applied, last_applied, 0); gu::serialize8(state->vote_seqno, vote_seqno, 0); // 4.ee gu::serialize8(state->vote_res, vote_res, 0); gu::serialize1(state->vote_policy, vote_policy, 0); *prim_gcs_ver = state->prim_gcs_ver; *prim_repl_ver = state->prim_repl_ver; *prim_appl_ver = state->prim_appl_ver; size_t const msg_len((uint8_t*)(prim_appl_ver + 1) - (uint8_t*)buf); #ifndef NDEBUG char str[1024]; gu_hexdump(buf, msg_len, str, sizeof(str), true); gu_debug("Serialized state message of size %zd\n%s", msg_len, str); #endif /* NDEBUG */ return msg_len; } /* De-serialize gcs_state_msg_t from buf */ gcs_state_msg_t* gcs_state_msg_read (const void* const buf, ssize_t const buf_len) { assert (buf_len > 0); #ifndef NDEBUG char str[1024]; gu_hexdump(buf, buf_len, str, sizeof(str), true); gu_debug("Received state message of size %zd\n%s", buf_len, str); #endif /* NDEBUG*/ /* beginning of the message is always version 0 */ CONST_STATE_MSG_FIELDS_V0(buf); const char* inc_addr = name + strlen (name) + 1; int appl_proto_ver = 0; uint8_t* appl_ptr = (uint8_t*)(inc_addr + strlen(inc_addr) + 1); if (*version >= 1) { assert(buf_len >= (uint8_t*)(appl_ptr + 1) - (uint8_t*)buf); appl_proto_ver = *appl_ptr; } int64_t cached = GCS_SEQNO_ILL; int64_t* cached_ptr = (int64_t*)(appl_ptr + 1); if (*version >= 3) { assert(buf_len >= (uint8_t*)(cached_ptr + 1) - (uint8_t*)buf); gu::unserialize8(cached_ptr, 0, cached); } // v4 stuff int32_t desync_count = 0; int32_t* desync_count_ptr = (int32_t*)(cached_ptr + 1); if (*version >= 4) { assert(buf_len >= (uint8_t*)(desync_count_ptr + 1) - (uint8_t*)buf); gu::unserialize4(desync_count_ptr, 0, desync_count); } // v5 stuff int64_t last_applied = 0; int64_t vote_seqno = 0; int64_t vote_res = 0; uint8_t vote_policy = GCS_VOTE_ZERO_WINS; // backward compatibility int64_t* last_applied_ptr = (int64_t*)(desync_count_ptr + 1); if (*version >= 5 && *gcs_proto_ver >= 2) { assert(buf_len > (uint8_t*)(last_applied_ptr + 3) - (uint8_t*)buf); gu::unserialize8(last_applied_ptr, 0, last_applied); gu::unserialize8(last_applied_ptr + 1, 0, vote_seqno); gu::unserialize8(last_applied_ptr + 2, 0, vote_res); gu::unserialize1(last_applied_ptr + 3, 0, vote_policy); } // v6 stuff uint8_t prim_gcs_ver = 0; uint8_t* prim_gcs_ptr = (uint8_t*)(last_applied_ptr + 3) + 1; uint8_t prim_repl_ver = 0; uint8_t* prim_repl_ptr = (uint8_t*)(prim_gcs_ptr + 1); uint8_t prim_appl_ver = 0; uint8_t* prim_appl_ptr = (uint8_t*)(prim_repl_ptr + 1); if (*version >= 6) { assert(buf_len >= (uint8_t*)(prim_appl_ptr + 1) - (uint8_t*)buf); prim_gcs_ver = *prim_gcs_ptr; prim_repl_ver = *prim_repl_ptr; prim_appl_ver = *prim_appl_ptr; } gcs_state_msg_t* ret = gcs_state_msg_create ( state_uuid, group_uuid, prim_uuid, gtoh64(*prim_seqno), gtoh64(*received), cached, last_applied, vote_seqno, vote_res, vote_policy, gtoh16(*prim_joined), (gcs_node_state_t)*prim_state, (gcs_node_state_t)*curr_state, name, inc_addr, *gcs_proto_ver, *repl_proto_ver, appl_proto_ver, prim_gcs_ver, prim_repl_ver, prim_appl_ver, desync_count, *flags ); if (ret) ret->version = *version; // dirty hack return ret; } /* Print state message contents to buffer */ int gcs_state_msg_snprintf (char* str, size_t size, const gcs_state_msg_t* state) { str[size - 1] = '\0'; // preventive termination return snprintf (str, size - 1, "\n\tVersion : %d" "\n\tFlags : %#02hhx" "\n\tProtocols : %d / %d / %d" "\n\tState : %s" "\n\tDesync count : %d" "\n\tPrim state : %s" "\n\tPrim UUID : " GU_UUID_FORMAT "\n\tPrim seqno : %lld" "\n\tFirst seqno : %lld" "\n\tLast seqno : %lld" "\n\tCommit cut : %lld" "\n\tLast vote : %lld.%0llx" "\n\tVote policy : %d" "\n\tPrim JOINED : %d" "\n\tState UUID : " GU_UUID_FORMAT "\n\tGroup UUID : " GU_UUID_FORMAT "\n\tName : '%s'" "\n\tIncoming addr: '%s'\n", state->version, state->flags, state->gcs_proto_ver, state->repl_proto_ver, state->appl_proto_ver, gcs_node_state_to_str(state->current_state), state->desync_count, gcs_node_state_to_str(state->prim_state), GU_UUID_ARGS(&state->prim_uuid), (long long)state->prim_seqno, (long long)state->cached, (long long)state->received, (long long)state->last_applied, (long long)state->vote_seqno,(long long)state->vote_res, state->vote_policy, state->prim_joined, GU_UUID_ARGS(&state->state_uuid), GU_UUID_ARGS(&state->group_uuid), state->name, state->inc_addr ); } /* Get state uuid */ const gu_uuid_t* gcs_state_msg_uuid (const gcs_state_msg_t* state) { return &state->state_uuid; } /* Get group uuid */ const gu_uuid_t* gcs_state_msg_group_uuid (const gcs_state_msg_t* state) { return &state->group_uuid; } /* Get action seqno */ gcs_seqno_t gcs_state_msg_received (const gcs_state_msg_t* state) { return state->received; } /* Get first cached action seqno */ gcs_seqno_t gcs_state_msg_cached (const gcs_state_msg_t* state) { return state->cached; } /* Get last applied action seqno */ gcs_seqno_t gcs_state_msg_last_applied (const gcs_state_msg_t* state) { return state->last_applied; } /* Get last applied action vote */ void gcs_state_msg_last_vote (const gcs_state_msg_t* state, gcs_seqno_t& seqno, int64_t& res) { seqno = state->vote_seqno; res = state->vote_res; } uint8_t gcs_state_msg_vote_policy (const gcs_state_msg_t* state) { return state->vote_policy; } /* Get current node state */ gcs_node_state_t gcs_state_msg_current_state (const gcs_state_msg_t* state) { return state->current_state; } /* Get node state */ gcs_node_state_t gcs_state_msg_prim_state (const gcs_state_msg_t* state) { return state->prim_state; } /* Get node name */ const char* gcs_state_msg_name (const gcs_state_msg_t* state) { return state->name; } /* Get node incoming address */ const char* gcs_state_msg_inc_addr (const gcs_state_msg_t* state) { return state->inc_addr; } /* Get supported protocols */ void gcs_state_msg_get_proto_ver (const gcs_state_msg_t* state, int* gcs_proto_ver, int* repl_proto_ver, int* appl_proto_ver) { *gcs_proto_ver = state->gcs_proto_ver; *repl_proto_ver = state->repl_proto_ver; *appl_proto_ver = state->appl_proto_ver; } int gcs_state_msg_get_desync_count (const gcs_state_msg_t* state) { return state->desync_count; } /* Get state message flags */ uint8_t gcs_state_msg_flags (const gcs_state_msg_t* state) { return state->flags; } /* Returns the node which is most representative of a group */ static const gcs_state_msg_t* state_nodes_compare (const gcs_state_msg_t* left, const gcs_state_msg_t* right) { assert (0 == gu_uuid_compare(&left->group_uuid, &right->group_uuid)); /* Allow GCS_SEQNO_ILL seqnos if bootstrapping from non-prim */ assert ((gcs_state_msg_flags(left) & GCS_STATE_FBOOTSTRAP) || left->prim_seqno != GCS_SEQNO_ILL); assert ((gcs_state_msg_flags(right) & GCS_STATE_FBOOTSTRAP) || right->prim_seqno != GCS_SEQNO_ILL); if (left->received < right->received) { assert (left->prim_seqno <= right->prim_seqno); return right; } else if (left->received > right->received) { assert (left->prim_seqno >= right->prim_seqno); return left; } else { // act_id's are equal, choose the one with higher prim_seqno. if (left->prim_seqno < right->prim_seqno) { return right; } else { return left; } } } /* Helper - just prints out all significant (JOINED) nodes */ static void state_report_uuids (char* buf, size_t buf_len, const gcs_state_msg_t* states[], long states_num, gcs_node_state_t min_state) { long j; for (j = 0; j < states_num; j++) { if (states[j]->current_state >= min_state) { int written = gcs_state_msg_snprintf (buf, buf_len, states[j]); buf += written; buf_len -= written; } } } #define GCS_STATE_MAX_LEN 722 #define GCS_STATE_BAD_REP ((gcs_state_msg_t*)-1) /*! checks for inherited primary configuration, returns representative * @retval (void*)-1 in case of fatal error */ static const gcs_state_msg_t* state_quorum_inherit (const gcs_state_msg_t* states[], size_t states_num, gcs_state_quorum_t* quorum) { /* They all must have the same group_uuid or otherwise quorum is impossible. * Of those we need to find at least one that has complete state - * status >= GCS_STATE_JOINED. If we find none - configuration is * non-primary. * Of those with the status >= GCS_STATE_JOINED we choose the most * representative: with the highest act_seqno and prim_seqno. */ size_t i, j; const gcs_state_msg_t* rep = NULL; // find at least one JOINED/DONOR (donor was once joined) for (i = 0; i < states_num; i++) { if (gcs_node_is_joined(states[i]->current_state)) { rep = states[i]; break; } } if (!rep) { size_t buf_len = states_num * GCS_STATE_MAX_LEN; char* buf = static_cast(gu_malloc (buf_len)); if (buf) { state_report_uuids (buf, buf_len, states, states_num, GCS_NODE_STATE_NON_PRIM); #ifdef GCS_CORE_TESTING gu_info ("Quorum: No node with complete state:\n%s", buf); #else /* Print buf into stderr in order to message truncation * of application logger. */ gu_info ("Quorum: No node with complete state:"); fprintf(stderr, "%s\n", buf); #endif /* GCS_CORE_TESTING */ gu_free (buf); } return NULL; } // Check that all JOINED/DONOR have the same group UUID // and find most updated for (j = i + 1; j < states_num; j++) { if (gcs_node_is_joined(states[j]->current_state)) { if (gu_uuid_compare (&rep->group_uuid, &states[j]->group_uuid)) { // for now just freak out and print all conflicting nodes size_t buf_len = states_num * GCS_STATE_MAX_LEN; char* buf = static_cast(gu_malloc (buf_len)); if (buf) { state_report_uuids (buf, buf_len, states, states_num, GCS_NODE_STATE_DONOR); gu_fatal("Quorum impossible: conflicting group UUIDs:\n%s", buf); gu_free (buf); } else { gu_fatal("Quorum impossible: conflicting group UUIDs"); } return GCS_STATE_BAD_REP; } rep = state_nodes_compare (rep, states[j]); } } quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->last_applied = rep->last_applied; quorum->group_uuid = rep->group_uuid; quorum->primary = true; return rep; } struct candidate /* remerge candidate */ { gu_uuid_t prim_uuid; // V0 compatibility (0.8.1) gu_uuid_t state_uuid; gcs_seqno_t state_seqno; const gcs_state_msg_t* rep; int prim_joined; int found; gcs_seqno_t prim_seqno; }; static bool state_match_candidate (const gcs_state_msg_t* const s, struct candidate* const c, int const state_exchange_version) { switch (state_exchange_version) { case 0: // V0 compatibility (0.8.1) return (0 == gu_uuid_compare(&s->prim_uuid, &c->prim_uuid)); default: return ((0 == gu_uuid_compare(&s->group_uuid, &c->state_uuid)) && (s->received == c->state_seqno) && // what if they are different components. // but have same group uuid and received(0) // see gh24. (s->prim_seqno == c->prim_seqno)); } } /* try to find representative remerge candidate */ static const struct candidate* state_rep_candidate (const struct candidate* const c, int const c_num) { assert (c_num > 0); const struct candidate* rep = &c[0]; gu_uuid_t const state_uuid = rep->state_uuid; gcs_seqno_t state_seqno = rep->state_seqno; gcs_seqno_t prim_seqno = rep->prim_seqno; int i; for (i = 1; i < c_num; i++) { if (!gu_uuid_compare(&c[i].state_uuid, &GU_UUID_NIL)) { /* Ignore nodes with undefined state uuid, they have been * added to group before remerge and have clean state. */ continue; } else if (gu_uuid_compare(&state_uuid, &GU_UUID_NIL) && gu_uuid_compare(&state_uuid, &c[i].state_uuid)) { /* There are candidates from different groups */ return NULL; } assert (prim_seqno != c[i].prim_seqno || state_seqno != c[i].state_seqno); if (prim_seqno < c[i].prim_seqno) { rep = &c[i]; prim_seqno = rep->prim_seqno; } else if (prim_seqno == c[i].prim_seqno && state_seqno < c[i].state_seqno) { rep = &c[i]; state_seqno = rep->state_seqno; } } return rep; } /*! checks for full prim remerge after non-prim */ static const gcs_state_msg_t* state_quorum_remerge (const gcs_state_msg_t* const states[], long const states_num, bool const bootstrap, gcs_state_quorum_t* const quorum) { struct candidate* candidates = GU_CALLOC(states_num, struct candidate); if (!candidates) { gu_error ("Quorum: could not allocate %lu bytes for re-merge check.", states_num * sizeof(struct candidate)); return NULL; } int i, j; int candidates_found = 0; /* 1. Sort and count all nodes who have ever been JOINED by primary * component UUID */ for (i = 0; i < states_num; i++) { bool cond; if (bootstrap) { cond = gcs_state_msg_flags(states[i]) & GCS_STATE_FBOOTSTRAP; if (cond) gu_debug("found node %s with bootstrap flag", gcs_state_msg_name(states[i])); } else { cond = gcs_node_is_joined(states[i]->prim_state); } if (cond) { if (!bootstrap && GCS_NODE_STATE_JOINER == states[i]->current_state) { /* Joiner always has an undefined state * (and it should be its prim_state!) */ gu_warn ("Inconsistent state message from %d (%s): current " "state is %s, but the primary state was %s.", i, states[i]->name, gcs_node_state_to_str(states[i]->current_state), gcs_node_state_to_str(states[i]->prim_state)); continue; } assert(bootstrap || gu_uuid_compare(&states[i]->prim_uuid, &GU_UUID_NIL)); for (j = 0; j < candidates_found; j++) { if (state_match_candidate (states[i], &candidates[j], quorum->version)) { assert(states[i]->prim_joined == candidates[j].prim_joined); // comment out following two lines for pc recovery // when nodes recoveried from state files, if their states // match, so candidates[j].found > 0. // However their prim_joined == 0. // assert(candidates[j].found < candidates[j].prim_joined); // assert(candidates[j].found > 0); candidates[j].found++; candidates[j].rep = state_nodes_compare (candidates[j].rep, states[i]); break; } } if (j == candidates_found) { // we don't have this candidate in the list yet candidates[j].prim_uuid = states[i]->prim_uuid; candidates[j].state_uuid = states[i]->group_uuid; candidates[j].state_seqno = states[i]->received; candidates[j].prim_joined = states[i]->prim_joined; candidates[j].rep = states[i]; candidates[j].found = 1; candidates[j].prim_seqno = states[i]->prim_seqno; candidates_found++; assert(candidates_found <= states_num); } } } const gcs_state_msg_t* rep = NULL; if (candidates_found) { assert (candidates_found > 0); const struct candidate* const rc = state_rep_candidate (candidates, candidates_found); if (!rc) { gu_error ("Found more than one %s primary component candidate.", bootstrap ? "bootstrap" : "re-merged"); rep = NULL; } else { if (bootstrap) { gu_info ("Bootstrapped primary " GU_UUID_FORMAT " found: %d.", GU_UUID_ARGS(&rc->prim_uuid), rc->found); } else { gu_info ("%s re-merge of primary " GU_UUID_FORMAT " found: " "%d of %d.", rc->found == rc->prim_joined ? "Full" : "Partial", GU_UUID_ARGS(&rc->prim_uuid), rc->found, rc->prim_joined); } rep = rc->rep; assert (NULL != rep); assert (bootstrap || gcs_node_is_joined(rep->prim_state)); quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->last_applied = rep->last_applied; quorum->group_uuid = rep->group_uuid; quorum->primary = true; } } else { assert (0 == candidates_found); gu_warn ("No %s primary component found.", bootstrap ? "bootstrapped" : "re-merged"); } gu_free (candidates); return rep; } #if 0 // REMOVE WHEN NO LONGER NEEDED FOR REFERENCE /*! Checks for prim comp bootstrap */ static const gcs_state_msg_t* state_quorum_bootstrap (const gcs_state_msg_t* const states[], long const states_num, gcs_state_quorum_t* const quorum) { struct candidate* candidates = GU_CALLOC(states_num, struct candidate); if (!candidates) { gu_error ("Quorum: could not allocate %zd bytes for re-merge check.", states_num * sizeof(struct candidate)); return NULL; } int i, j; int candidates_found = 0; /* 1. Sort and count all nodes which have bootstrap flag set */ for (i = 0; i < states_num; i++) { if (gcs_state_msg_flags(states[i]) & GCS_STATE_FBOOTSTRAP) { gu_debug("found node %s with bootstrap flag", gcs_state_msg_name(states[i])); for (j = 0; j < candidates_found; j++) { if (state_match_candidate (states[i], &candidates[j], quorum->version)) { assert(states[i]->prim_joined == candidates[j].prim_joined); assert(candidates[j].found > 0); candidates[j].found++; candidates[j].rep = state_nodes_compare (candidates[j].rep, states[i]); break; } } if (j == candidates_found) { // we don't have this candidate in the list yet candidates[j].prim_uuid = states[i]->prim_uuid; candidates[j].state_uuid = states[i]->group_uuid; candidates[j].state_seqno = states[i]->received; candidates[j].prim_joined = states[i]->prim_joined; candidates[j].rep = states[i]; candidates[j].found = 1; candidates_found++; assert(candidates_found <= states_num); } } } const gcs_state_msg_t* rep = NULL; if (candidates_found) { assert (candidates_found > 0); const struct candidate* const rc = state_rep_candidate (candidates, candidates_found); if (!rc) { gu_error ("Found more than one bootstrap primary component " "candidate."); rep = NULL; } else { gu_info ("Bootstrapped primary " GU_UUID_FORMAT " found: %d.", GU_UUID_ARGS(&rc->prim_uuid), rc->found); rep = rc->rep; assert (NULL != rep); quorum->act_id = rep->received; quorum->conf_id = rep->prim_seqno; quorum->last_applied = rep->last_applied; quorum->group_uuid = rep->group_uuid; quorum->primary = true; } } else { assert (0 == candidates_found); gu_warn ("No bootstrapped primary component found."); } gu_free (candidates); return rep; } #endif // 0 /* Get quorum decision from state messages */ long gcs_state_msg_get_quorum (const gcs_state_msg_t* states[], size_t states_num, gcs_state_quorum_t* quorum) { assert (states_num > 0); assert (NULL != states); size_t i; const gcs_state_msg_t* rep = NULL; *quorum = GCS_QUORUM_NON_PRIMARY; // pessimistic assumption /* find lowest commonly supported state exchange version */ quorum->version = states[0]->version; for (i = 1; i < states_num; i++) { if (quorum->version > states[i]->version) { quorum->version = states[i]->version; } } rep = state_quorum_inherit (states, states_num, quorum); if (!quorum->primary && rep != GCS_STATE_BAD_REP) { rep = state_quorum_remerge (states, states_num, false, quorum); } if (!quorum->primary && rep != GCS_STATE_BAD_REP) { rep = state_quorum_remerge (states, states_num, true, quorum); } if (!quorum->primary) { gu_error ("Failed to establish quorum."); return 0; } assert (rep != NULL); // select the highest commonly supported protocol: min(proto_max) #define INIT_PROTO_VER(LEVEL) quorum->LEVEL = rep->LEVEL INIT_PROTO_VER(gcs_proto_ver); INIT_PROTO_VER(repl_proto_ver); INIT_PROTO_VER(appl_proto_ver); #undef INIT_PROTO_VER for (i = 0; i < states_num; i++) { #define CHECK_MIN_PROTO_VER(LEVEL) \ if (states[i]->LEVEL < quorum->LEVEL) { \ quorum->LEVEL = states[i]->LEVEL; \ } // if (!gu_uuid_compare(&states[i]->group_uuid, &quorum->group_uuid)) { CHECK_MIN_PROTO_VER(gcs_proto_ver); CHECK_MIN_PROTO_VER(repl_proto_ver); CHECK_MIN_PROTO_VER(appl_proto_ver); // } #undef CHECK_MIN_PROTO_VER } if (quorum->version >= GCS_STATE_MSG_NO_PROTO_DOWNGRADE_VER) { // forbid protocol downgrade #define CHECK_MIN_PROTO_VER(LEVEL) \ if (quorum->LEVEL##_proto_ver < rep->prim_##LEVEL##_ver) { \ quorum->LEVEL##_proto_ver = rep->prim_##LEVEL##_ver; \ } CHECK_MIN_PROTO_VER(gcs); CHECK_MIN_PROTO_VER(repl); CHECK_MIN_PROTO_VER(appl); #undef CHECK_MIN_PROTO_VER } if (quorum->gcs_proto_ver < 1) { quorum->vote_policy = GCS_VOTE_ZERO_WINS; } else { quorum->vote_policy = rep->vote_policy; } if (quorum->gcs_proto_ver >= 6) { if (quorum->last_applied > quorum->act_id) { // commit cut is infected from pre v6 versions, reset it quorum->last_applied = 0; } } if (quorum->version < 2) {;} // for future generations if (quorum->version < 1) { // appl_proto_ver is not supported by all members assert (quorum->repl_proto_ver <= 1); if (1 == quorum->repl_proto_ver) quorum->appl_proto_ver = 1; else quorum->appl_proto_ver = 0; } return 0; } galera-4-26.4.25/gcs/src/gcs_backend.cpp000644 000164 177776 00000004767 15107057155 021024 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /*********************************************************/ /* This unit initializes the backend given backend URI */ /*********************************************************/ #include #include #include #include #include "gcs_backend.hpp" #include "gcs_dummy.hpp" #ifdef GCS_USE_SPREAD #include "gcs_spread.h" #endif /* GCS_USE_SPREAD */ #ifdef GCS_USE_VS #include "gcs_vs.h" #endif /* GCS_USE_VS */ #ifdef GCS_USE_GCOMM #include "gcs_gcomm.hpp" #endif /* GCS_USE_GCOMM */ bool gcs_backend_register(gu_config_t* const conf) { bool ret = false; #ifdef GCS_USE_GCOMM ret |= gcs_gcomm_register(conf); #endif /* GCS_USE_GCOMM */ #ifdef GCS_USE_VS #endif /* GCS_USE_VS */ #ifdef GCS_USE_SPREAD #endif /* GCS_USE_SPREAD */ ret |= gcs_dummy_register(conf); return ret; } /* Static array describing backend ID - open() pairs */ static struct { const char* id; gcs_backend_create_t create; } const backend[] = { #ifdef GCS_USE_GCOMM { "gcomm", gcs_gcomm_create}, #endif /* GCS_USE_GCOMM */ #ifdef GCS_USE_VS { "vsbes", gcs_vs_create }, #endif /* GCS_USE_VS */ #ifdef GCS_USE_SPREAD { "spread", gcs_spread_create }, #endif /* GCS_USE_SPREAD */ { "dummy", gcs_dummy_create }, { NULL, NULL } // terminating pair }; static const char backend_sep[] = "://"; /* Returns true if backend matches, false otherwise */ static bool backend_type_is (const char* uri, const char* type, const size_t len) { if (len == strlen(type)) { if (!strncmp (uri, type, len)) return true; } return false; } long gcs_backend_init (gcs_backend_t* const bk, const char* const uri, gu_config_t* const conf) { const char* sep; assert (NULL != bk); assert (NULL != uri); sep = strstr (uri, backend_sep); if (NULL != sep) { size_t type_len = sep - uri; const char* addr = sep + strlen(backend_sep); long i; /* try to match any of specified backends */ for (i = 0; backend[i].id != NULL; i++) { if (backend_type_is (uri, backend[i].id, type_len)) return backend[i].create (bk, addr, conf); } /* no backends matched */ gu_error ("Backend not supported: %s", uri); return -ESOCKTNOSUPPORT; } gu_error ("Invalid backend URI: %s", uri); return -EINVAL; } galera-4-26.4.25/gcs/src/gcs_recv_msg.hpp000644 000164 177776 00000001145 15107057155 021232 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * Receiving message context */ #ifndef _gcs_recv_msg_h_ #define _gcs_recv_msg_h_ #include "gcs_msg_type.hpp" typedef struct gcs_recv_msg { void* buf; int buf_len; int size; int sender_idx; gcs_msg_type_t type; gcs_recv_msg() { } gcs_recv_msg(void* b, long bl, long sz, long si, gcs_msg_type_t t) : buf(b), buf_len(bl), size(sz), sender_idx(si), type(t) { } } gcs_recv_msg_t; #endif /* _gcs_recv_msg_h_ */ galera-4-26.4.25/gcs/src/gcs_test.sh000755 000164 177776 00000001056 15107057155 020233 0ustar00jenkinsnogroup000000 000000 #!/bin/sh # # This script checks the output of the gcs_test program # to verify that all actions that were sent were received # intact # # $Id$ SEND_LOG="gcs_test_send.log" RECV_LOG="gcs_test_recv.log" echo "Sent action count: $(wc -l $SEND_LOG)" echo "Received action count: $(wc -l $RECV_LOG)" SEND_MD5=$(cat "$SEND_LOG" | awk '{ print $4 " " $5 }'| sort -n -k 2 | tee sort_send | md5sum) echo "send_log md5: $SEND_MD5" RECV_MD5=$(cat "$RECV_LOG" | awk '{ print $4 " " $5 }'| sort -n -k 2 | tee sort_recv | md5sum) echo "recv_log md5: $RECV_MD5" # galera-4-26.4.25/gcs/src/gcs_act.hpp000644 000164 177776 00000001642 15107057155 020176 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #ifndef _gcs_act_h_ #define _gcs_act_h_ #include "gcs.hpp" struct gcs_act { const void* buf; ssize_t buf_len; gcs_act_type_t type; gcs_act() : buf(NULL), buf_len(0), type(GCS_ACT_ERROR) { } gcs_act(const void* b, ssize_t bl, gcs_act_type_t t) : buf(b), buf_len(bl), type(t) { } }; struct gcs_act_rcvd { struct gcs_act act; const struct gu_buf* local; // local buffer vector if any gcs_seqno_t id; // global total order seqno int sender_idx; gcs_act_rcvd() : act(), local(NULL), id(GCS_SEQNO_ILL), sender_idx(-1) { } gcs_act_rcvd(const gcs_act& a, const struct gu_buf* loc, gcs_seqno_t i, int si) : act(a), local(loc), id(i), sender_idx(si) { } }; #endif /* _gcs_act_h_ */ galera-4-26.4.25/gcs/src/gcs.hpp000644 000164 177776 00000046101 15107057155 017346 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2021 Codership Oy * * $Id$ */ /*! * @file gcs.c Public GCS API */ #ifndef _gcs_h_ #define _gcs_h_ #include "gcs_gcache.hpp" #include #include #include #include #include #include #include #include #include #include #include #include #include /*! @typedef @brief Sequence number type. */ typedef int64_t gcs_seqno_t; /*! @def @brief Illegal sequence number. Action not serialized. */ static const gcs_seqno_t GCS_SEQNO_ILL = -1; /*! @def @brief Empty state. No actions applied. */ static const gcs_seqno_t GCS_SEQNO_NIL = 0; /*! @def @brief Start of the sequence */ static const gcs_seqno_t GCS_SEQNO_FIRST = 1; /*! @def @brief history UUID length */ #define GCS_UUID_LEN 16 /*! @def @brief maximum supported size of an action (2GB - 1) */ #define GCS_MAX_ACT_SIZE 0x7FFFFFFF /*! Connection handle type */ typedef struct gcs_conn gcs_conn_t; /*! @brief Creates GCS connection handle. * * @param conf gu_config_t* configuration object, can be null. * @param cache pointer to the gcache object. * @param node_name human readable name of the node, can be null. * @param inc_addr address at which application accepts incoming requests. * Used for load balancing, can be null. * @param repl_proto_ver max replicator protocol version. * @param appl_proto_ver max application ptotocol version. * @return pointer to GCS connection handle, NULL in case of failure. */ extern gcs_conn_t* gcs_create (gu::Config& conf, gcache_t* cache, gu::Progress::Callback* progress_cb, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver); /*! @brief Initialize group history values (optional). * Serves to provide group history persistence after process restart (in case * these data were saved somewhere on persistent storage or the like). If these * values are provided, it is only a hint for the group, as they might be * outdated. Actual seqno and UUID are returned in GCS_ACT_CCHANGE action (see * below) and are determined by quorum. * * This function must be called before gcs_open() or after gcs_close(). * * @param position Global Transaction ID corresponding to the current * application state. * Should be undefined for undefined state. * * @return 0 in case of success, -EBUSY if connection is already opened, * -EBADFD if connection object is being destroyed. */ extern long gcs_init (gcs_conn_t* conn, const gu::GTID& position); /*! @brief Opens connection to group (joins channel). * * @param conn connection object * @param channel a name of the channel to join. It must uniquely identify * the channel. If the channel with such name does not exist, * it is created. Processes that joined the same channel * receive the same actions. * @param url an URL-like string that specifies backend communication * driver in the form "TYPE://ADDRESS?options". For gcomm * backend it can be "gcomm://localhost:4567", for dummy backend * ADDRESS field is ignored. * Currently supported backend types: "dummy", "vsbes", "gcomm" * @param bootstrap bootstrap a new group * * @return negative error code, 0 in case of success. */ extern long gcs_open (gcs_conn_t *conn, const char *channel, const char *url, bool bootstrap); /*! @brief Closes connection to group. * * @param conn connection handle * @return negative error code or 0 in case of success. */ extern long gcs_close (gcs_conn_t *conn); /*! @brief Frees resources associuated with connection handle. * * @param conn connection handle * @return negative error code or 0 in case of success. */ extern long gcs_destroy (gcs_conn_t *conn); /*! @brief Deprecated. Waits until the group catches up. * This call checks if any member of the group (including this one) has a * long slave queue. Should be called before gcs_repl(), gcs_send(). * * @return negative error code, 1 if wait is required, 0 otherwise */ extern long gcs_wait (gcs_conn_t *conn); /*! @typedef @brief Action types. * There is a conceptual difference between "messages" * and "actions". Messages are ELEMENTARY pieces of information * atomically delivered by group communication. They are typically * limited in size to a single IP packet. Events generated by group * communication layer must be delivered as a single message. * * For the purpose of this work "action" is a higher level concept * introduced to overcome the message size limitation. Application * replicates information in actions of ARBITRARY size that are * fragmented into as many messages as needed. As such actions * can be delivered only in primary configuration, when total order * of underlying messages is established. * The best analogy for action/message concept would be word/letter. * * The purpose of GCS library is to hide message handling from application. * Therefore application deals only with "actions". * Application can only send actions of types GCS_ACT_WRITESET, * GCS_ACT_COMMIT_CUT and GCS_ACT_STATE_REQ. * Actions of type GCS_ACT_SYNC, GCS_ACT_CCHANGE are generated by the library. */ typedef enum gcs_act_type { /* ordered actions */ GCS_ACT_WRITESET, //! action representing state change, will be assigned // global seqno GCS_ACT_COMMIT_CUT, //! group-wide action commit cut GCS_ACT_STATE_REQ, //! request for state transfer GCS_ACT_CCHANGE, //! group configuration change GCS_ACT_JOIN, //! joined group (received all state data) GCS_ACT_SYNC, //! synchronized with group GCS_ACT_FLOW, //! flow control GCS_ACT_VOTE, //! vote on GTID outcome GCS_ACT_SERVICE, //! service action, sent by GCS GCS_ACT_ERROR, //! error happened while receiving the action GCS_ACT_INCONSISTENCY,//! inconsistency event GCS_ACT_UNKNOWN //! undefined/unknown action type } gcs_act_type_t; #define GCS_VOTE_REQUEST 1 /* vote request indicator */ /*! String representations of action types */ extern const char* gcs_act_type_to_str(gcs_act_type_t); /*! @brief Sends a vector of buffers as a single action to group and returns. * A copy of action will be returned through gcs_recv() call, or discarded * in case it is not delivered by group. * For a better means to replicate an action see gcs_repl(). @see gcs_repl() * * @param conn group connection handle * @param act_bufs action buffer vector * @param act_size total action size (the sum of buffer sizes) * @param act_type action type * @param scheduled whether the call was scheduled by gcs_schedule() * @param grab use gcs_sm_grab() instead of gcs_sm_enter() * @return negative error code, action size in case of success * @retval -EINTR thread was interrupted while waiting to enter the monitor */ extern long gcs_sendv (gcs_conn_t* conn, const struct gu_buf* act_bufs, size_t act_size, gcs_act_type_t act_type, bool scheduled, bool grab); /*! A wrapper for single buffer communication */ static inline long gcs_send (gcs_conn_t* const conn, const void* const act, size_t const act_size, gcs_act_type_t const act_type, bool const scheduled) { struct gu_buf const buf[1] = { act, static_cast(act_size) }; return gcs_sendv (conn, &(buf[0]), act_size, act_type, scheduled, false); } /*!*/ struct gcs_action { gcs_seqno_t seqno_g; gcs_seqno_t seqno_l; const void* buf; /*! unlike input, output goes as a single buffer */ int32_t size; gcs_act_type_t type; }; std::ostream& operator <<(std::ostream& os, const gcs_action& act); /*! @brief Replicates a vector of buffers as a single action. * Sends action to group and blocks until it is received. Upon return global * and local IDs are set. Arguments are the same as in gcs_recv(). * @see gcs_recv() * * @param conn group connection handle * @param act_in action buffer vector (total size is passed in action) * @param action action struct * @param scheduled whether the call was preceded by gcs_schedule() * @param seq_cb callback struct for signalling the caller once the * replication sequence has been established * @return negative error code, action size in case of success * @retval -EINTR: thread was interrupted while waiting to enter the monitor */ extern long gcs_replv (gcs_conn_t* conn, const struct gu_buf* act_in, struct gcs_action* action, bool scheduled, const wsrep_seq_cb_t* seq_cb ); /*! A wrapper for single buffer communication */ static inline long gcs_repl (gcs_conn_t* const conn, struct gcs_action* const action, bool const scheduled) { struct gu_buf const buf[1] = { action->buf, action->size }; return gcs_replv (conn, &(buf[0]), action, scheduled, nullptr); } /*! @brief Receives an action from group. * Blocks if no actions are available. Action buffer is allocated by GCS * and must be freed by application when action is no longer needed. * Also sets global and local action IDs. Global action ID uniquely identifies * action in the history of the group and can be used to identify the state * of the application for state snapshot purposes. Local action ID is a * monotonic gapless number sequence starting with 1 which can be used * to serialize access to critical sections. * * @param conn group connection handle * @param action action object * @return negative error code, action size in case of success, * @retval 0 on connection close */ extern long gcs_recv (gcs_conn_t* conn, struct gcs_action* action); /*! * @brief Schedules entry to CGS send monitor. * Locks send monitor and should be quickly followed by gcs_repl()/gcs_send() * * @retval 0 - won't queue * @retval >0 - queue handle * @retval -EAGAIN - too many queued threads * @retval -EBADFD - connection is closed */ extern long gcs_schedule (gcs_conn_t* conn); /*! * @brief Interrupt a thread waiting to enter send monitor. * * @param conn GCS connection * @param handle queue handle returned by @func gcs_schedule(). Must be > 0 * * @retval 0 success * @retval -ESRCH no such thread/already interrupted */ extern long gcs_interrupt (gcs_conn_t* conn, long handle); /*! * Resume receivng from group. * * @param conn GCS connection * * @retval 0 success * @retval -EBADFD connection is in closed state */ extern long gcs_resume_recv (gcs_conn_t* conn); /*! * After action with this gtid is applied, this thread is guaranteed to see * all the changes made by the client, even on other nodes. * * @retval 0 success * @retval -EPERM operation not permitted (in NON_PRIMARY state) * @retval -EAGAIN operation may be retried later (in transient state) */ extern long gcs_caused (gcs_conn_t* conn, gu::GTID& gtid); /*! @brief Sends state transfer request * Broadcasts state transfer request which will be passed to one of the * suitable group members. * * @param conn connection to group * @param ver STR version. * @param req opaque byte array that contains data required for * the state transfer (application dependent) * @param size request size * @param donor desired state transfer donor name. Supply empty string to * choose automatically. * @param ist_gtid where to start IST from * @param order response to request was ordered with this local order. * Must be skipped in local queues. * @return negative error code, index of state transfer donor in case of success * (notably, -EAGAIN means try later, -EHOSTUNREACH means desired donor * is unavailable) */ extern long gcs_request_state_transfer (gcs_conn_t* conn, int ver, const void* req, size_t size, const char* donor, const gu::GTID& ist_gtid, gcs_seqno_t& order); /*! @brief Turns off flow control on the node. * Effectively desynchronizes the node from the cluster (while the node keeps on * receiving all the actions). Requires gcs_join() to return to normal. * * @param conn connection to group * @param order response to request was ordered with this seqno. * Must be skipped in local queues. * @return negative error code, 0 in case of success. */ extern long gcs_desync (gcs_conn_t* conn, gcs_seqno_t& order); /*! @brief Informs group on behalf of donor that state stransfer is over. * If status is non-negative, joiner will be considered fully joined to group. * * @param conn opened connection to group * @param gtid containing negative error code in case of state transfer failure, * or gtid of joined state. * @return negative error code, 0 in case of success */ extern long gcs_join (gcs_conn_t *conn, const gu::GTID& gtid, int code); /*! @brief Allocate local seqno for accessing local resources. * * * @param conn connection to group * @return local seqno, negative error code in case of error */ extern gcs_seqno_t gcs_local_sequence(gcs_conn_t* conn); /////////////////////////////////////////////////////////////////////////////// /* Service functions */ /*! Informs group about the last applied action on this node */ extern long gcs_set_last_applied (gcs_conn_t* conn, const gu::GTID& gtid); /*! @return currently established GCS protocol */ extern int gcs_proto_ver(gcs_conn_t* conn); /*! @brief Vote on the error code that resulted from processing the gtid action. * * Blocks until consensus is reached or call fails. * * @param msg optional error message (should not be node-specific) * @param msg_len message length * * @return 0 for majority agrees on error, 1 for majority disagrees with error, * negative errno for technical call failure. */ extern int gcs_vote (gcs_conn_t* conn, const gu::GTID& gtid, uint64_t code, const void* msg, size_t msg_len); /* GCS Configuration */ /*! Registers configurable parameters with conf object * throws exception if error happened */ extern void gcs_register_params (gu::Config& conf); /*! sets the key to a given value * * @return 0 in case of success, 1 if key not found or negative error code */ extern long gcs_param_set (gcs_conn_t* conn, const char* key, const char *value); /*! returns the value of the key * * @return NULL if key not found */ extern const char* gcs_param_get (gcs_conn_t* conn, const char* key); /* Logging options */ extern long gcs_conf_set_log_file (FILE *file); extern long gcs_conf_set_log_callback (void (*logger) (int, const char*)); extern long gcs_conf_self_tstamp_on (); extern long gcs_conf_self_tstamp_off (); extern long gcs_conf_debug_on (); extern long gcs_conf_debug_off (); /* Sending options (deprecated, use gcs_param_set instead) */ /* Sets maximum DESIRED network packet size. * For best results should be multiple of MTU */ extern long gcs_conf_set_pkt_size (gcs_conn_t *conn, long pkt_size); #define GCS_DEFAULT_PKT_SIZE 64500 /* 43 Eth. frames to carry max IP packet */ /* * Configuration action */ /*! Possible node states */ typedef enum gcs_node_state { GCS_NODE_STATE_NON_PRIM, /// in non-primary configuration, outdated state GCS_NODE_STATE_PRIM, /// in primary conf, needs state transfer GCS_NODE_STATE_JOINER, /// in primary conf, receiving state transfer GCS_NODE_STATE_DONOR, /// joined, donating state transfer GCS_NODE_STATE_JOINED, /// contains full state GCS_NODE_STATE_SYNCED, /// syncronized with group GCS_NODE_STATE_MAX } gcs_node_state_t; /*! Convert state code to null-terminates string */ extern const char* gcs_node_state_to_str (gcs_node_state_t state); /*! New configuration action deserialized */ struct gcs_act_cchange { gcs_act_cchange(); gcs_act_cchange(const void* buf, int size); int write(void** buf) const; bool operator==(const gcs_act_cchange& other) const; struct member { member() : uuid_(), name_(), incoming_(), cached_(), state_() {} bool operator==(const member& other) const; gu_uuid_t uuid_; std::string name_; std::string incoming_; gcs_seqno_t cached_; gcs_node_state state_; }; std::vector memb; gu_uuid_t uuid; //! group UUID gcs_seqno_t seqno; //! last global seqno applied by this group gcs_seqno_t conf_id; //! configuration ID (-1 if non-primary) gcs_seqno_t vote_seqno; int64_t vote_res; int repl_proto_ver; //! replicator protocol version to use int appl_proto_ver; //! application protocol version to use }; std::ostream& operator <<(std::ostream& os, const struct gcs_act_cchange& cc); struct gcs_stats { double send_q_len_avg; //! average send queue length per send call double recv_q_len_avg; //! average recv queue length per queued action long long fc_paused_ns; //! total nanoseconds spent in paused state double fc_paused_avg; //! faction of time paused due to flow control long long fc_ssent; //! flow control stops sent long long fc_csent; //! flow control conts sent long long fc_received; //! flow control stops received size_t recv_q_size; //! current recv queue size int recv_q_len; //! current recv queue length int recv_q_len_max; //! maximum recv queue length int recv_q_len_min; //! minimum recv queue length int send_q_len; //! current send queue length int send_q_len_max; //! maximum send queue length int send_q_len_min; //! minimum send queue length int proto_appl; //! application protocol level int proto_repl; //! replicator protocol level int proto_gcs; //! GCS protocol level bool fc_active; //! flow control is currently active bool fc_requested; //! flow control is requested by this node }; /*! Fills stats struct */ extern void gcs_get_stats (gcs_conn_t *conn, struct gcs_stats* stats); /*! flushes stats counters */ extern void gcs_flush_stats(gcs_conn_t *conn); void gcs_get_status(gcs_conn_t* conn, gu::Status& status); /*! A node with this name will be treated as a stateless arbitrator */ #define GCS_ARBITRATOR_NAME "garb" #endif // _gcs_h_ galera-4-26.4.25/gcs/src/gcs_test.cpp000644 000164 177776 00000060171 15107057155 020403 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ /***********************************************************/ /* This program imitates 3rd party application and */ /* tests GCS library in a dummy standalone configuration */ /***********************************************************/ #include #include #include #include #include #include #include #include #include #include #include #include "gcs.hpp" #include "gcs_test.hpp" #define USE_WAIT #define gcs_malloc(a) ((a*) malloc (sizeof (a))) #define gcs_free(a) (free ((a))) static pthread_mutex_t gcs_test_lock = PTHREAD_MUTEX_INITIALIZER; static gcache_t* cache = NULL; typedef struct gcs_test_log { FILE *file; pthread_mutex_t lock; } gcs_test_log_t; #define SEND_LOG "/dev/shm/gcs_test_send.log" #define RECV_LOG "/dev/shm/gcs_test_recv.log" static gcs_test_log_t *send_log, *recv_log; static bool throughput = true; // bench for throughput static bool total = true; // also enable TO locking typedef enum { GCS_TEST_SEND, GCS_TEST_RECV, GCS_TEST_REPL } gcs_test_repl_t; typedef struct gcs_test_thread { pthread_t thread; long id; struct gcs_action act; long n_tries; void* msg; char* log_msg; } gcs_test_thread_t; #define MAX_MSG_LEN (1 << 16) static long gcs_test_thread_create (gcs_test_thread_t *t, long id, long n_tries) { t->id = id; t->msg = calloc (MAX_MSG_LEN, sizeof(char)); t->act.buf = t->msg; t->act.size = MAX_MSG_LEN; t->act.seqno_g = GCS_SEQNO_ILL; t->act.seqno_l = GCS_SEQNO_ILL; t->act.type = GCS_ACT_WRITESET; t->n_tries = n_tries; if (t->msg) { t->log_msg = (char*)calloc (MAX_MSG_LEN, sizeof(char)); if (t->log_msg) return 0; } return -ENOMEM; } static long gcs_test_thread_destroy (gcs_test_thread_t *t) { if (t->msg) free (t->msg); if (t->log_msg) free (t->log_msg); return 0; } typedef struct gcs_test_thread_pool { long n_threads; long n_tries; long n_started; gcs_test_repl_t type; gcs_test_thread_t *threads; } gcs_test_thread_pool_t; static long gcs_test_thread_pool_create (gcs_test_thread_pool_t *pool, const gcs_test_repl_t type, const long n_threads, const long n_tries) { long err = 0; long i; // pool = gcs_malloc (gcs_test_thread_pool_t); // if (!pool) { err = errno; goto out; } pool->n_threads = n_threads; pool->type = type; pool->n_tries = n_tries; pool->n_started = 0; pool->threads = (gcs_test_thread_t *) calloc (pool->n_threads, sizeof (gcs_test_thread_t)); if (!pool->threads) { err = errno; fprintf (stderr, "Failed to allocate %ld thread objects: %ld (%s)\n", n_threads, err, strerror(err)); goto out1; } for (i = 0; i < pool->n_threads; i++) { if ((err = gcs_test_thread_create (pool->threads + i, i, n_tries))) { err = errno; fprintf (stderr, "Failed to create thread object %ld: %ld (%s)\n", i, err, strerror(err)); goto out2; } } // printf ("Created %ld thread objects\n", i); return 0; out2: while (i) { i--; gcs_test_thread_destroy (pool->threads + i); } free (pool->threads); out1: free (pool); //out: return err; } static void gcs_test_thread_pool_destroy (gcs_test_thread_pool_t* pool) { long i; if (pool->threads) { for (i = 0; i < pool->n_threads; i++) { gcs_test_thread_destroy (pool->threads + i); } free (pool->threads); } } static pthread_mutex_t make_msg_lock = PTHREAD_MUTEX_INITIALIZER; //static long total_tries; static inline long test_make_msg (char* msg, const long mlen) { static gcs_seqno_t count = 1; long len = 0; if (!throughput) { pthread_mutex_lock (&make_msg_lock); count++; pthread_mutex_unlock (&make_msg_lock); len = snprintf (msg, mlen, "%10d %9llu %s", rand(), (unsigned long long)count++, gcs_test_data); } else { len = rand() % mlen + 1; // just random length, we don't care about // contents } if (len >= mlen) return mlen; else return len; } static long test_log_open (gcs_test_log_t **log, const char *name) { char real_name[1024]; gcs_test_log_t *l = gcs_malloc (gcs_test_log_t); if (!l) return errno; snprintf (real_name, 1024, "%s.%lld", name, (long long)getpid()); // cppcheck-suppress memleak if (!(l->file = fopen (real_name, "w"))) { gu_free(l); *log = NULL; return errno; } pthread_mutex_init (&l->lock, NULL); *log = l; return 0; } static long test_log_close (gcs_test_log_t **log) { long err = 0; gcs_test_log_t *l = *log; if (l) { pthread_mutex_lock (&l->lock); err = fclose (l->file); pthread_mutex_unlock (&l->lock); pthread_mutex_destroy (&l->lock); } return err; } static inline long gcs_test_log_msg (gcs_test_log_t *log, const char *msg) { long err = 0; err = fprintf (log->file, "%s\n", msg); return err; } gcs_conn_t *gcs = NULL; gu_to_t *to = NULL; long msg_sent = 0; long msg_recvd = 0; long msg_repld = 0; long msg_len = 0; size_t size_sent = 0; size_t size_repld = 0; size_t size_recvd = 0; static inline long test_recv_log_create(gcs_test_thread_t* thread) { return snprintf (thread->log_msg, MAX_MSG_LEN - 1, "Thread %3ld(REPL): act_id = %lld, local_act_id = %lld, " "len = %lld: %s", thread->id, (long long)thread->act.seqno_g, (long long)thread->act.seqno_l, (long long)thread->act.size, (const char*)thread->act.buf); } static inline long test_send_log_create(gcs_test_thread_t* thread) { return snprintf (thread->log_msg, MAX_MSG_LEN - 1, "Thread %3ld (REPL): len = %lld, %s", thread->id, (long long) thread->act.size, (const char*)thread->act.buf); } static inline long test_log_msg (gcs_test_log_t* log, const char* msg) { long ret; pthread_mutex_lock (&log->lock); ret = fprintf (recv_log->file, "%s\n", msg); pthread_mutex_lock (&log->lock); return ret; } static inline long test_log_in_to (gu_to_t* to, gcs_seqno_t seqno, const char* msg) { long ret = 0; while ((ret = gu_to_grab (to, seqno)) == -EAGAIN) usleep(10000); if (!ret) {// success if (msg != NULL) gcs_test_log_msg (recv_log, msg); ret = gu_to_release (to, seqno); } return ret; } static gcs_seqno_t group_seqno = 0; static gu::UUID group_uuid; static inline long test_send_last_applied (gcs_conn_t* gcs, gcs_seqno_t my_seqno) { long ret = 0; #define SEND_LAST_MASK ((1 << 14) - 1) // every 16K seqno if (!(my_seqno & SEND_LAST_MASK)) { ret = gcs_set_last_applied (gcs, gu::GTID(group_uuid, my_seqno)); if (ret) { fprintf (stderr,"gcs_set_last_applied(%lld) returned %ld\n", (long long)my_seqno, ret); } // if (!throughput) { fprintf (stdout, "Last applied: my = %lld, group = %lld\n", (long long)my_seqno, (long long)group_seqno); // } } return ret; } static inline long test_before_send (gcs_test_thread_t* thread) { #ifdef USE_WAIT static const struct timespec wait = { 0, 10000000 }; #endif long ret = 0; /* create a message */ thread->act.size = test_make_msg ((char*)thread->msg, msg_len); thread->act.buf = thread->msg; if (thread->act.size <= 0) return -1; if (!throughput) { /* log message before replication */ ret = test_send_log_create (thread); ret = test_log_msg (send_log, thread->log_msg); } #ifdef USE_WAIT while ((ret = gcs_wait(gcs)) && ret > 0) nanosleep (&wait, NULL); #endif return ret; } static inline long test_after_recv (gcs_test_thread_t* thread) { long ret; if (!throughput) { /* log message after replication */ ret = test_recv_log_create (thread); ret = test_log_in_to (to, thread->act.seqno_l, thread->log_msg); } else if (total) { ret = test_log_in_to (to, thread->act.seqno_l, NULL); } else { gu_to_self_cancel (to, thread->act.seqno_l); } ret = test_send_last_applied (gcs, thread->act.seqno_g); // fprintf (stdout, "SEQNO applied %lld", thread->local_act_id); if (thread->act.type == GCS_ACT_WRITESET) gcache_free (cache, thread->act.buf); return ret; } void *gcs_test_repl (void *arg) { gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; // long i = thread->n_tries; long ret = 0; pthread_mutex_lock (&gcs_test_lock); pthread_mutex_unlock (&gcs_test_lock); while (thread->n_tries) { ret = test_before_send (thread); if (ret < 0) break; /* replicate message */ ret = gcs_repl (gcs, &thread->act, false); if (ret < 0) { assert (thread->act.seqno_g == GCS_SEQNO_ILL); assert (thread->act.seqno_l == GCS_SEQNO_ILL); break; } msg_repld++; size_repld += thread->act.size; // usleep ((rand() & 1) << 1); test_after_recv (thread); // puts (thread->log_msg); fflush (stdout); } // fprintf (stderr, "REPL thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } void *gcs_test_send (void *arg) { long ret = 0; gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; // long i = thread->n_tries; pthread_mutex_lock (&gcs_test_lock); pthread_mutex_unlock (&gcs_test_lock); while (thread->n_tries) { ret = test_before_send (thread); if (ret < 0) break; /* send message to group */ ret = gcs_send (gcs, thread->act.buf, thread->act.size, GCS_ACT_WRITESET, false); if (ret < 0) break; //sleep (1); msg_sent++; size_sent += thread->act.size; } // fprintf (stderr, "SEND thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } static void gcs_test_handle_configuration (gcs_conn_t* gcs, gcs_test_thread_t* thread) { long ret; static gcs_seqno_t conf_id = 0; gcs_act_cchange const conf(thread->act.buf, thread->act.size); int const my_idx(thread->act.seqno_g); gcs_node_state my_state(conf.memb[my_idx].state_); gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; fprintf (stdout, "Got GCS_ACT_CCHANGE: Conf: %lld, " "seqno: %lld, members: %zd, my idx: %d, local seqno: %lld\n", (long long)conf.conf_id, (long long)conf.seqno, conf.memb.size(), my_idx, (long long)thread->act.seqno_l); fflush (stdout); // NOTE: what really needs to be checked is seqno and group_uuid, but here // we don't keep track of them (and don't do real transfers), // so for simplicity, just check conf_id. while (-EAGAIN == (ret = gu_to_grab (to, thread->act.seqno_l))); if (0 == ret) { group_uuid = conf.uuid; if (my_state == GCS_NODE_STATE_PRIM) { gcs_seqno_t seqno, s; fprintf (stdout,"Gap in configurations: ours: %lld, group: %lld.\n", (long long)conf_id, (long long)conf.conf_id); fflush (stdout); int err(gcs_request_state_transfer(gcs, 0, &conf.seqno, sizeof(conf.seqno),"", gu::GTID(ist_uuid, ist_seqno), seqno)); fprintf (stdout, "Requesting state transfer up to %lld: %s\n", (long long)conf.seqno, // this is global seqno strerror (-err)); // pretend that state transfer is complete, cancel every action up // to seqno for (s = thread->act.seqno_l + 1; s <= seqno; s++) { gu_to_self_cancel (to, s); // this is local seqno } fprintf (stdout, "Sending JOIN: %s\n", strerror(-gcs_join(gcs, gu::GTID(group_uuid, seqno), 0))); fflush (stdout); } gcs_resume_recv (gcs); gu_to_release (to, thread->act.seqno_l); } else { fprintf (stderr, "Failed to grab TO: %ld (%s)", ret, strerror(ret)); } conf_id = conf.conf_id; } void *gcs_test_recv (void *arg) { long ret = 0; gcs_test_thread_t *thread = (gcs_test_thread_t*)arg; while (thread->n_tries) { /* receive message from group */ while ((ret = gcs_recv (gcs, &thread->act)) == -ECANCELED) { usleep (10000); } if (ret <= 0) { fprintf (stderr, "gcs_recv() %s: %ld (%s). Thread exits.\n", ret < 0 ? "failed" : "connection closed", ret, strerror(-ret)); assert (thread->act.buf == NULL); assert (thread->act.size == 0); assert (thread->act.seqno_g == GCS_SEQNO_ILL); assert (thread->act.seqno_l == GCS_SEQNO_ILL); assert (thread->act.type == GCS_ACT_ERROR); break; } assert (thread->act.type < GCS_ACT_ERROR); msg_recvd++; size_recvd += thread->act.size; switch (thread->act.type) { case GCS_ACT_WRITESET: test_after_recv (thread); //puts (thread->log_msg); fflush (stdout); break; case GCS_ACT_COMMIT_CUT: group_seqno = *(gcs_seqno_t*)thread->act.buf; gu_to_self_cancel (to, thread->act.seqno_l); break; case GCS_ACT_CCHANGE: gcs_test_handle_configuration (gcs, thread); break; case GCS_ACT_STATE_REQ: fprintf (stdout, "Got STATE_REQ\n"); gu_to_grab (to, thread->act.seqno_l); fprintf (stdout, "Sending JOIN: %s\n", strerror(-gcs_join(gcs, gu::GTID(group_uuid, group_seqno), 0))); fflush (stdout); gu_to_release (to, thread->act.seqno_l); break; case GCS_ACT_JOIN: fprintf (stdout, "Joined\n"); gu_to_self_cancel (to, thread->act.seqno_l); break; case GCS_ACT_SYNC: fprintf (stdout, "Synced\n"); gu_to_self_cancel (to, thread->act.seqno_l); break; default: fprintf (stderr, "Unexpected action type: %d\n", thread->act.type); } } // fprintf (stderr, "RECV thread %ld exiting: %s\n", // thread->id, strerror(-ret)); return NULL; } static long gcs_test_thread_pool_start (gcs_test_thread_pool_t *pool) { long i; long err = 0; void * (* thread_routine) (void *); switch (pool->type) { case GCS_TEST_REPL: thread_routine = gcs_test_repl; break; case GCS_TEST_SEND: thread_routine = gcs_test_send; break; case GCS_TEST_RECV: thread_routine = gcs_test_recv; break; default: fprintf (stderr, "Bad repl type %u\n", pool->type); return -1; } for (i = 0; i < pool->n_threads; i++) { if ((err = pthread_create (&pool->threads[i].thread, NULL, thread_routine, &pool->threads[i]))) break; } pool->n_started = i; printf ("Started %ld threads of %s type (pool: %p)\n", pool->n_started, GCS_TEST_REPL == pool->type ? "REPL" : (GCS_TEST_SEND == pool->type ? "SEND" :"RECV"), (void*)pool); return 0; } static long gcs_test_thread_pool_join (const gcs_test_thread_pool_t *pool) { long i; for (i = 0; i < pool->n_started; i++) { pthread_join (pool->threads[i].thread, NULL); } return 0; } static long gcs_test_thread_pool_stop (const gcs_test_thread_pool_t *pool) { long i; for (i = 0; i < pool->n_started; i++) { pool->threads[i].n_tries = 0; } return 0; } long gcs_test_thread_pool_cancel (const gcs_test_thread_pool_t *pool) { long i; printf ("Canceling pool: %p\n", (void*)pool); fflush(stdout); printf ("pool type: %u, pool threads: %ld\n", pool->type, pool->n_started); fflush(stdout); for (i = 0; i < pool->n_started; i++) { printf ("Cancelling %ld\n", i); fflush(stdout); pthread_cancel (pool->threads[i].thread); pool->threads[i].n_tries = 0; } return 0; } typedef struct gcs_test_conf { long n_tries; long n_repl; long n_send; long n_recv; const char* backend; } gcs_test_conf_t; static const char* DEFAULT_BACKEND = "dummy://"; static long gcs_test_conf (gcs_test_conf_t *conf, long argc, char *argv[]) { char *endptr; /* defaults */ conf->n_tries = 10; conf->n_repl = 10; conf->n_send = 0; conf->n_recv = 1; conf->backend = DEFAULT_BACKEND; switch (argc) { case 6: conf->n_recv = strtol (argv[5], &endptr, 10); if ('\0' != *endptr) goto error; // fall through case 5: conf->n_send = strtol (argv[4], &endptr, 10); if ('\0' != *endptr) goto error; // fall through case 4: conf->n_repl = strtol (argv[3], &endptr, 10); if ('\0' != *endptr) goto error; // fall through case 3: conf->n_tries = strtol (argv[2], &endptr, 10); if ('\0' != *endptr) goto error; // fall through case 2: conf->backend = argv[1]; break; default: break; } printf ("Config: n_tries = %ld, n_repl = %ld, n_send = %ld, n_recv = %ld, " "backend = %s\n", conf->n_tries, conf->n_repl, conf->n_send, conf->n_recv, conf->backend); return 0; error: printf ("Usage: %s [backend] [tries:%ld] [repl threads:%ld] " "[send threads: %ld] [recv threads: %ld]\n", argv[0], conf->n_tries, conf->n_repl, conf->n_send, conf->n_recv); exit (EXIT_SUCCESS); } static inline void test_print_stat (long msgs, size_t size, double interval) { printf ("%7ld (%7.1f per sec.) / %7zuKb (%7.1f Kb/s)\n", msgs, (double)msgs/interval, size >> 10, (double)(size >> 10)/interval); } int main (int argc, char *argv[]) { long err = 0; gcs_test_conf_t conf; gcs_test_thread_pool_t repl_pool, send_pool, recv_pool; const char *channel = "my_channel"; struct timeval t_begin, t_end; gu_config_t* gconf; bool bstrap; try { gcs_conf_debug_on(); // turn on debug messages if ((err = gcs_test_conf (&conf, argc, argv))) goto out; if (!throughput) { if ((err = test_log_open (&send_log, SEND_LOG))) goto out; if ((err = test_log_open (&recv_log, RECV_LOG))) goto out; } to = gu_to_create ((conf.n_repl + conf.n_recv + 1)*2, GCS_SEQNO_FIRST); if (!to) goto out; // total_tries = conf.n_tries * (conf.n_repl + conf.n_send); printf ("Opening connection: channel = %s, backend = %s\n", channel, conf.backend); gconf = gu_config_create (); if (!gconf) goto out; gcache::GCache::register_params(*reinterpret_cast(gconf)); gu_config_set_string(gconf, "gcache.size", "0"); gu_config_set_string(gconf, "gcache.page_size", "1M"); gcs_register_params(*reinterpret_cast(gconf)); if (!(cache = gcache_create (gconf, ""))) goto out; if (!(gcs = gcs_create (*reinterpret_cast(gconf), cache, NULL, NULL, NULL, 0, 0))) goto out; puts ("debug"); fflush(stdout); /* the following hack won't work if there is 0.0.0.0 in URL options */ bstrap = (NULL != strstr(conf.backend, "0.0.0.0")); if ((err = gcs_open (gcs, channel, conf.backend, bstrap))) goto out; printf ("Connected\n"); msg_len = 1300; if (msg_len > MAX_MSG_LEN) msg_len = MAX_MSG_LEN; gcs_conf_set_pkt_size (gcs, 7570); // to test fragmentation if ((err = gcs_test_thread_pool_create (&repl_pool, GCS_TEST_REPL, conf.n_repl, conf.n_tries))) goto out; if ((err = gcs_test_thread_pool_create (&send_pool, GCS_TEST_SEND, conf.n_send, conf.n_tries))) goto out; if ((err = gcs_test_thread_pool_create (&recv_pool, GCS_TEST_RECV, conf.n_recv, conf.n_tries))) goto out; pthread_mutex_lock (&gcs_test_lock); gcs_test_thread_pool_start (&recv_pool); gcs_test_thread_pool_start (&repl_pool); gcs_test_thread_pool_start (&send_pool); printf ("Press any key to start the load:"); fgetc (stdin); puts ("Started load."); gettimeofday (&t_begin, NULL); printf ("Waiting for %ld seconds\n", conf.n_tries); fflush (stdout); pthread_mutex_unlock (&gcs_test_lock); usleep (conf.n_tries*1000000); puts ("Stopping SEND and REPL threads..."); fflush(stdout); fflush(stderr); gcs_test_thread_pool_stop (&send_pool); gcs_test_thread_pool_stop (&repl_pool); puts ("Threads stopped."); gcs_test_thread_pool_join (&send_pool); gcs_test_thread_pool_join (&repl_pool); puts ("SEND and REPL threads joined."); printf ("Closing GCS connection... "); if ((err = gcs_close (gcs))) goto out; puts ("done."); gcs_test_thread_pool_join (&recv_pool); puts ("RECV threads joined."); gettimeofday (&t_end, NULL); { double interval = (t_end.tv_sec - t_begin.tv_sec) + 0.000001*t_end.tv_usec - 0.000001*t_begin.tv_usec; printf ("Actions sent: "); test_print_stat (msg_sent, size_sent, interval); printf ("Actions received: "); test_print_stat (msg_recvd, size_recvd, interval); printf ("Actions replicated: "); test_print_stat (msg_repld, size_repld, interval); puts("---------------------------------------------------------------"); printf ("Total throughput: "); test_print_stat (msg_repld + msg_recvd, size_repld + size_recvd, interval); printf ("Overhead at 10000 actions/sec: %5.2f%%\n", 1000000.0 * interval / (msg_repld + msg_recvd)); puts(""); } printf ("Press any key to exit the program:\n"); fgetc (stdin); printf ("Freeing GCS connection handle..."); if ((err = gcs_destroy (gcs))) goto out; gcs = NULL; printf ("done\n"); fflush (stdout); printf ("Destroying GCache object:\n"); gcache_destroy (cache); gcs_test_thread_pool_destroy (&repl_pool); gcs_test_thread_pool_destroy (&send_pool); gcs_test_thread_pool_destroy (&recv_pool); gu_to_destroy(&to); if (!throughput) { printf ("Closing send log\n"); test_log_close (&send_log); printf ("Closing recv log\n"); test_log_close (&recv_log); } { ssize_t total; ssize_t allocs; ssize_t reallocs; ssize_t deallocs; void gu_mem_stats (ssize_t*, ssize_t*, ssize_t*, ssize_t*); gu_mem_stats (&total, &allocs, &reallocs, &deallocs); printf ("Memory statistics:\n" "Memory still allocated: %10lld\n" "Times allocated: %10lld\n" "Times reallocated: %10lld\n" "Times freed: %10lld\n", (long long)total, (long long)allocs, (long long)reallocs, (long long)deallocs); } } catch (gu::UUIDScanException& u) { printf("UUIDScanException: %d\n", u.get_errno()); } catch (gu::NotFound& nf) { printf("NotFound exception\n"); } catch (gu::NotSet& ns) { printf("NotSet exception\n"); } catch (gu::Exception& e) { printf("Exception caught: %d : %s\n", e.get_errno(), e.what()); } catch (std::exception& e) { printf("Exception caught: %s\n", e.what()); } catch (...) { printf("Error : unknown exception happened.\n"); } return 0; out: printf ("Error: %ld (%s)\n", err, strerror (-err)); return err; } galera-4-26.4.25/gcs/src/CMakeLists.txt000644 000164 177776 00000002662 15107057155 020625 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # set(GCS_SOURCES gcs_act_cchange.cpp gcs_code_msg.cpp gcs_params.cpp gcs_conf.cpp gcs_fifo_lite.cpp gcs_msg_type.cpp gcs_comp_msg.cpp gcs_sm.cpp gcs_backend.cpp gcs_dummy.cpp gcs_act_proto.cpp gcs_defrag.cpp gcs_state_msg.cpp gcs_node.cpp gcs_group.cpp gcs_core.cpp gcs_fc.cpp gcs.cpp gcs_gcomm.cpp gcs_error.cpp ) # # Gcs library. # add_library(gcs STATIC ${GCS_SOURCES}) target_compile_definitions(gcs PRIVATE -DGALERA_LOG_H_ENABLE_CXX -DGCS_USE_GCOMM) if (GALERA_GCS_SM_DEBUG) target_compile_definitions(gcs PRIVATE -DGCS_SM_DEBUG) endif() # TODO: Fix. target_compile_options(gcs PRIVATE -Wno-conversion -Wno-unused-parameter -Wno-overloaded-virtual -Wno-vla ) target_link_libraries(gcs gcomm gcache) # # Gcs library for Garb daemon. # add_library(gcs4garb STATIC ${GCS_SOURCES}) target_compile_definitions(gcs4garb PRIVATE -DGCS_FOR_GARB -DGALERA_LOG_H_ENABLE_CXX -DGCS_USE_GCOMM ) # TODO: Fix. target_compile_options(gcs4garb PRIVATE -Wno-conversion -Wno-unused-parameter -Wno-overloaded-virtual -Wno-vla ) target_link_libraries(gcs4garb gcomm gcache) # # Gcs test program, must be run manually. # add_executable(gcs_test gcs_test.cpp) target_compile_options(gcs_test PRIVATE -Wno-conversion -Wno-unused-parameter) target_link_libraries(gcs_test gcs gcomm) add_subdirectory(unit_tests) galera-4-26.4.25/gcs/src/gcs_spread.cpp000644 000164 177776 00000046423 15107057155 020706 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*****************************************/ /* Implementation of Spread GC backend */ /*****************************************/ #include #include #include #include #include #include #include "gcs_spread.h" #include "gcs_comp_msg.h" #define SPREAD_MAX_GROUPS 256 #if (GCS_COMP_MEMB_ID_MAX_LEN < MAX_GROUP_NAME) #error "GCS_COMP_MEMB_ID_MAX_LEN is smaller than Spread's MAX_GROUP_NAME" #error "This can make creation of component message impossible." #endif typedef struct string_array { int32 max_strings; int32 num_strings; char strings[0][MAX_GROUP_NAME]; } string_array_t; static string_array_t* string_array_alloc (const long n) { string_array_t *ret = NULL; ret = gu_malloc (sizeof (string_array_t) + n * MAX_GROUP_NAME); if (ret) { ret->max_strings = n; ret->num_strings = 0; } return ret; } static void string_array_free (string_array_t *a) { gu_free (a); } typedef enum spread_config { SPREAD_REGULAR, SPREAD_TRANSITIONAL } spread_config_t; typedef struct gcs_backend_conn { char *socket; char *channel; char *priv_name; char *priv_group; char *sender; long msg_type; long my_id; /* process ID returned with REG_MEMB message */ long config_id; // long memb_num; string_array_t *memb; string_array_t *groups; gcs_comp_msg_t *comp_msg; spread_config_t config; /* type of configuration: regular or trans */ mailbox mbox; } spread_t; /* this function converts socket address from conventional * "addr:port" notation to Spread's "port@addr" notation */ static long gcs_to_spread_socket (const char const *socket, char **sp_socket) { char *colon = strrchr (socket, ':'); size_t addr_len = colon - socket; size_t port_len = strlen (socket) - addr_len - 1; char *sps = NULL; if (!colon) return -EADDRNOTAVAIL; sps = (char *) strdup (socket); if (!sps) return -ENOMEM; memcpy (sps, colon+1, port_len); memcpy (sps + port_len + 1, socket, addr_len); sps[port_len] = '@'; *sp_socket = sps; return 0; } static const char* spread_default_socket = "localhost:4803"; static long spread_create (spread_t** spread, const char* socket) { long err = 0; spread_t *sp = GU_CALLOC (1, spread_t); *spread = NULL; if (!sp) { err = -ENOMEM; goto out0; } if (NULL == socket || strlen(socket) == 0) socket = spread_default_socket; err = gcs_to_spread_socket (socket, &sp->socket); if (err < 0) { goto out1; } sp->priv_name = GU_CALLOC (MAX_PRIVATE_NAME, char); if (!sp->priv_name) { err = -ENOMEM; goto out3; } sp->priv_group = GU_CALLOC (MAX_GROUP_NAME, char); if (!sp->priv_group) { err = -ENOMEM; goto out4; } sp->sender = GU_CALLOC (MAX_GROUP_NAME, char); if (!sp->sender) { err = -ENOMEM; goto out5; } sp->groups = string_array_alloc (SPREAD_MAX_GROUPS); if (!sp->groups) { err = -ENOMEM; goto out6; } sp->memb = string_array_alloc (SPREAD_MAX_GROUPS); if (!sp->memb) { err = -ENOMEM; goto out7; } sp->config = SPREAD_TRANSITIONAL; sp->config_id = -1; sp->comp_msg = NULL; gu_debug ("sp->priv_group: %p", sp->priv_group); *spread = sp; return err; out7: string_array_free (sp->groups); out6: gu_free (sp->sender); out5: gu_free (sp->priv_group); out4: gu_free (sp->priv_name); out3: free (sp->socket); out1: gu_free (sp); out0: return err; } /* Compiles a string of MAX_PRIVATE_NAME characters out of a supplied string and a number, returns -1 if digits overflow */ long spread_priv_name (char *name, const char *string, long n) { /* must make sure that it does not overflow MAX_PRIVATE_NAME */ long max_digit = 2; long max_string = MAX_PRIVATE_NAME - max_digit; long len = snprintf (name, max_string + 1, "%s", string); if (len > max_string) len = max_string; // truncated gu_debug ("len = %d, max_string = %d, MAX_PRIVATE_NAME = %d\n", len, (int)max_string, MAX_PRIVATE_NAME); len = snprintf (name + len, max_digit + 1, "_%d", (int)n); if (len > max_digit) return -1; // overflow return 0; } static GCS_BACKEND_CLOSE_FN(spread_close) { long err = 0; spread_t *spread = backend->conn; if (!spread) return -EBADFD; err = SP_leave (spread->mbox, spread->channel); if (err) { switch (err) { case ILLEGAL_GROUP: return -EADDRNOTAVAIL; case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } else { return 0; } } static GCS_BACKEND_DESTROY_FN(spread_destroy) { long err = 0; spread_t *spread = backend->conn; if (!spread) return -EBADFD; err = SP_disconnect (spread->mbox); if (spread->memb) string_array_free (spread->memb); if (spread->groups) string_array_free (spread->groups); if (spread->sender) gu_free (spread->sender); if (spread->priv_name) gu_free (spread->priv_name); if (spread->priv_group) gu_free (spread->priv_group); if (spread->channel) free (spread->channel); // obtained by strdup() if (spread->socket) free (spread->socket); if (spread->comp_msg) gcs_comp_msg_delete(spread->comp_msg); gu_free (spread); backend->conn = NULL; if (err) { switch (err) { case ILLEGAL_GROUP: return -EADDRNOTAVAIL; case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } else { return 0; } } static GCS_BACKEND_SEND_FN(spread_send) { long ret = 0; spread_t *spread = backend->conn; if (SPREAD_TRANSITIONAL == spread->config) return -EAGAIN; /* can it be that not all of the message is sent? */ ret = SP_multicast (spread->mbox, // mailbox SAFE_MESS, // service type spread->channel, // destination group (short)msg_type, // message from application len, // message length (const char*)buf // message buffer ); if (ret != len) { if (ret > 0) return -ECONNRESET; /* Failed to send the whole message */ switch (ret) { case ILLEGAL_SESSION: return -ENOTCONN; case CONNECTION_CLOSED: return -ECONNRESET; default: return -EOPNOTSUPP; } } #ifdef GCS_DEBUG_SPREAD // gu_debug ("spread_send: message sent: %p, len: %d\n", buf, ret); #endif return ret; } /* Substitutes old member array for new (taken from groups), * creates new groups buffer. */ static inline long spread_update_memb (spread_t* spread) { string_array_t* new_groups = string_array_alloc (SPREAD_MAX_GROUPS); if (!new_groups) return -ENOMEM; string_array_free (spread->memb); spread->memb = spread->groups; spread->groups = new_groups; return 0; } /* Temporarily this is done by simple iteration through the whole list. * for a cluster of 2-3 nodes this is probably most optimal. * But it clearly needs to be improved. */ static inline long spread_sender_id (const spread_t* const spread, const char* const sender_name) { long id; for (id = 0; id < spread->memb->num_strings; id++) { if (!strncmp(sender_name, spread->memb->strings[id], MAX_GROUP_NAME)) return id; } return GCS_SENDER_NONE; } static gcs_comp_msg_t* spread_comp_create (long my_id, long config_id, long memb_num, char names[][MAX_GROUP_NAME]) { gcs_comp_msg_t* comp = gcs_comp_msg_new (memb_num > 0, my_id, memb_num); long ret = -ENOMEM; if (comp) { long i; for (i = 0; i < memb_num; i++) { ret = gcs_comp_msg_add (comp, names[i]); if (ret != i) { gcs_comp_msg_delete (comp); goto fatal; } } gu_debug ("Created a component message of length %d.", gcs_comp_msg_size(comp)); return comp; } fatal: gu_fatal ("Failed to allocate component message: %s", strerror(-ret)); return NULL; } /* This function actually finalizes component message delivery: * it makes sure that the caller will receive the message and only then * changes handle state (spread->config)*/ static long spread_comp_deliver (spread_t* spread, void* buf, long len, gcs_msg_type_t* msg_type) { long ret; assert (spread->comp_msg); ret = gcs_comp_msg_size (spread->comp_msg); if (ret <= len) { memcpy (buf, spread->comp_msg, ret); spread->config = SPREAD_REGULAR; gcs_comp_msg_delete (spread->comp_msg); spread->comp_msg = NULL; *msg_type = GCS_MSG_COMPONENT; gu_debug ("Component message delivered (length %ld)", ret); } else { // provided buffer is too small for a message: // simply return required size } return ret; } static GCS_BACKEND_RECV_FN(spread_recv) { long ret = 0; spread_t *spread = backend->conn; service serv_type; int16 mess_type; int32 endian_mismatch; /* in case of premature exit */ *sender_idx = GCS_SENDER_NONE; *msg_type = GCS_MSG_ERROR; if (spread->comp_msg) { /* undelivered regular component message */ return spread_comp_deliver (spread, buf, len, msg_type); } if (!len) { // Spread does not seem to tolerate 0-sized buffer return 4096; } while (1) /* Loop while we don't receive the right message */ { ret = SP_receive (spread->mbox, // mailbox/connection &serv_type, // service type: // REGULAR_MESS/MEMBERSHIP_MESS spread->sender, // private group name of a sender spread->groups->max_strings, &spread->groups->num_strings, spread->groups->strings, &mess_type, // app. defined message type &endian_mismatch, len, // maximum message length (char*)buf // message buffer ); // gcs_log ("gcs_spread_recv: SP_receive returned\n"); // gcs_log ("endian_mismatch = %d\n", endian_mismatch); // /* seems there is a bug in either libsp or spread daemon */ // if (spread->groups->num_strings < 0 && ret > 0) // ret = GROUPS_TOO_SHORT; /* First, handle errors */ if (ret < 0) { switch (ret) { case BUFFER_TOO_SHORT: { if (Is_membership_mess (serv_type)) { // Ignore this error as membership messages don't fill // the buffer. Spread seems to have a bug - it returns // BUFFER_TOO_SHORT if you pass zero-length buffer for it. gu_debug ("BUFFER_TOO_SHORT in membership message."); ret = 0; break; } /* return required buffer size to caller */ gu_debug ("Error in SP_receive: BUFFER_TOO_SHORT"); gu_debug ("Supplied buffer len: %d, required: %d", len, (int) -endian_mismatch); gu_debug ("Message type: %d, sender: %d", mess_type, spread_sender_id (spread, spread->sender)); return -endian_mismatch; } case GROUPS_TOO_SHORT: { /* reallocate groups */ size_t num_groups = -spread->groups->num_strings; gu_warn ("Error in SP_receive: GROUPS_TOO_SHORT. " "Expect failure."); string_array_free (spread->groups); spread->groups = string_array_alloc (num_groups); if (!spread->groups) return -ENOMEM; /* try again */ continue; } case ILLEGAL_SESSION: gu_debug ("Error in SP_receive: ILLEGAL_SESSION"); return -ECONNABORTED; case CONNECTION_CLOSED: gu_debug ("Error in SP_receive: CONNECTION_CLOSED"); return -ECONNABORTED; case ILLEGAL_MESSAGE: gu_debug ("Error in SP_receive: ILLEGAL_MESSAGE"); continue; // wait for a legal one? default: gu_fatal ("unknown error = %d", ret); return -ENOTRECOVERABLE; } } /* At this point message was successfully received * and stored in buffer. */ if (Is_regular_mess (serv_type)) { // gu_debug ("received REGULAR message of type %d\n", // mess_type); assert (endian_mismatch >= 0); /* BUFFER_TOO_SMALL * must be handled before */ if (endian_mismatch) { gu_debug ("Spread returned ENDIAN_MISMATCH. Ignored."); } *msg_type = mess_type; *sender_idx = spread_sender_id (spread, spread->sender); assert (*sender_idx >= 0); assert (*sender_idx < spread->memb->num_strings); break; } else if (Is_membership_mess (serv_type)) { if (strncmp (spread->channel, spread->sender, MAX_GROUP_NAME)) continue; // wrong group/channel if (Is_transition_mess (serv_type)) { spread->config = SPREAD_TRANSITIONAL; gu_info ("Received TRANSITIONAL message"); continue; } else if (Is_reg_memb_mess (serv_type)) { //assert (spread->groups->num_strings > 0); spread->my_id = mess_type; gu_info ("Received REGULAR MEMBERSHIP " "in group \'%s\' with %d(%d) members " "where I'm member %d\n", spread->sender, spread->groups->num_strings, spread->groups->max_strings, spread->my_id); spread->config_id++; gu_debug ("Configuration number: %d", spread->config_id); spread->comp_msg = spread_comp_create (spread->my_id, spread->config_id, spread->groups->num_strings, spread->groups->strings); if (!spread->comp_msg) return -ENOTRECOVERABLE; /* Update membership info */ if ((ret = spread_update_memb(spread))) return ret; if (Is_caused_join_mess (serv_type)) { gu_info ("due to JOIN"); } else if (Is_caused_leave_mess (serv_type)) { gu_info ("due to LEAVE"); } else if (Is_caused_disconnect_mess (serv_type)) { gu_info ("due to DISCONNECT"); } else if (Is_caused_network_mess (serv_type)) { gu_info ("due to NETWORK"); } else { gu_warn ("unknown REG_MEMB message"); } ret = spread_comp_deliver (spread, buf, len, msg_type); } else if (Is_caused_leave_mess (serv_type)) { gu_info ("received SELF LEAVE message"); // *msg_type = GCS_MSG_COMPONENT; // memset (buf, 0, len); // trivial component spread->comp_msg = gcs_comp_msg_leave (); ret = spread_comp_deliver (spread, buf, len, msg_type); } else { gu_warn ("received unknown MEMBERSHIP message"); continue; // must do something ??? } } else if (Is_reject_mess (serv_type)) { gu_info ("received REJECTED message form %s", spread->sender); continue; } else /* Unknown message type */ { gu_warn ("received message of unknown type"); continue; } /* If we reached this point we have successfully received a message */ break; } /* message is already in buf and its length in ret */ return ret; } static GCS_BACKEND_NAME_FN(spread_name) { static char str[128]; int maj, min, patch; SP_version (&maj, &min, &patch); snprintf (str, 128, "Spread %d.%d.%d", maj, min, patch); return str; } /* Spread packet structure seem to be: * 42 bytes - Ethernet + IP + UDP header, 32 bytes Spread packet header + * 80 byte Spread message header present only in the first packet */ static GCS_BACKEND_MSG_SIZE_FN(spread_msg_size) { long ps = pkt_size; long frames = 0; const long eth_frame_size = 1514; const long spread_header_size = 154; // total headers in Spread packet const long spread_max_pkt_size = 31794; // 21 Ethernet frames if (pkt_size <= spread_header_size) { ps = spread_header_size + 1; gu_warn ("Requested packet size %d is too small, " "minimum possible is %d", pkt_size, ps); return pkt_size - ps; } if (pkt_size > spread_max_pkt_size) { ps = spread_max_pkt_size; gu_warn ("Requested packet size %d is too big, " "using maximum possible: %d", pkt_size, ps); } frames = ps / eth_frame_size; frames += ((frames * eth_frame_size) < ps); // one incomplete frame return (ps - frames * (42 + 32) - 80); } static GCS_BACKEND_OPEN_FN(spread_open) { long err = 0; spread_t* spread = backend->conn; if (!spread) return -EBADFD; if (!channel) { gu_error ("No channel supplied."); return -EINVAL; } spread->channel = strdup (channel); if (!spread->channel) return -ENOMEM; err = SP_join (spread->mbox, spread->channel); if (err) { switch (err) /* translate error codes */ { case ILLEGAL_GROUP: err = -EADDRNOTAVAIL; break; case ILLEGAL_SESSION: err = -EADDRNOTAVAIL; break; case CONNECTION_CLOSED: err = -ENETRESET; break; default: err = -ENOTCONN; break; } gu_error ("%s", strerror (-err)); return err; } gu_info ("Joined channel: %s", spread->channel); return err; } #if defined(__linux__) extern char *program_invocation_short_name; #endif GCS_BACKEND_CREATE_FN(gcs_spread_create) { long err = 0; long n = 0; spread_t* spread = NULL; backend->conn = NULL; if (!socket) { gu_error ("No socket supplied."); err = -EINVAL; goto out0; } if ((err = spread_create (&spread, socket))) goto out0; do { /* Try to generate unique name */ if (spread_priv_name (spread->priv_name, #if defined(__sun__) getexecname (), #elif defined(__APPLE__) || defined(__FreeBSD__) getprogname (), #elif defined(__linux__) program_invocation_short_name, #else "unknown", #endif n++)) { /* Failed to generate a name in the form * program_name_number. Let spread do it for us */ gu_free (spread->priv_name); spread->priv_name = NULL; } err = SP_connect (spread->socket, spread->priv_name, 0, 1, &spread->mbox, spread->priv_group); } while (REJECT_NOT_UNIQUE == err); if (err < 0) { gu_debug ("Spread connect error"); switch (err) /* translate error codes */ { case ILLEGAL_SPREAD: err = -ESOCKTNOSUPPORT; break; case COULD_NOT_CONNECT: err = -ENETUNREACH; break; case CONNECTION_CLOSED: err = -ENETRESET; break; case REJECT_ILLEGAL_NAME: err = -EADDRNOTAVAIL; gu_error ("Spread returned REJECT_ILLEGAL_NAME"); break; case REJECT_NO_NAME: err = -EDESTADDRREQ; gu_error ("Spread returned REJECT_NO_NAME." "Spread protocol error"); break; case REJECT_VERSION: default: gu_error ("Generic Spread error code: %d", err); err = -EPROTONOSUPPORT; break; } goto out1; } else { assert (err == ACCEPT_SESSION); err = 0; } gu_debug ("Connected to Spread: priv_name = %s, priv_group = %s", spread->priv_name, spread->priv_group); backend->conn = spread; backend->open = spread_open; backend->close = spread_close; backend->send = spread_send; backend->recv = spread_recv; backend->name = spread_name; backend->msg_size = spread_msg_size; backend->destroy = spread_destroy; return err; out1: spread_destroy (backend); out0: gu_error ("Creating Spread backend failed: %s (%d)", strerror (-err), err); return err; } galera-4-26.4.25/gcs/src/gcs_dummy.cpp000644 000164 177776 00000023041 15107057155 020552 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Dummy backend implementation * */ #include #include #include #include #include #include #include #define GCS_COMP_MSG_ACCESS // for gcs_comp_memb_t #ifndef GCS_DUMMY_TESTING #define GCS_DUMMY_TESTING #endif #include "gcs_dummy.hpp" typedef struct dummy_msg { gcs_msg_type_t type; ssize_t len; long sender_idx; uint8_t buf[]; } dummy_msg_t; static inline dummy_msg_t* dummy_msg_create (gcs_msg_type_t const type, size_t const len, long const sender, const void* const buf) { dummy_msg_t *msg = NULL; if ((msg = static_cast(gu_malloc (sizeof(dummy_msg_t) + len)))) { memcpy (msg->buf, buf, len); msg->len = len; msg->type = type; msg->sender_idx = sender; } return msg; } static inline long dummy_msg_destroy (dummy_msg_t *msg) { if (msg) { gu_free (msg); } return 0; } typedef enum dummy_state { DUMMY_DESTROYED, DUMMY_CLOSED, DUMMY_NON_PRIM, DUMMY_TRANS, DUMMY_PRIM, } dummy_state_t; typedef struct gcs_backend_conn { gu_fifo_t* gc_q; /* "serializator" */ volatile dummy_state_t state; gcs_seqno_t msg_id; const size_t max_pkt_size; const size_t hdr_size; const size_t max_send_size; long my_idx; long memb_num; gcs_comp_memb_t* memb; } dummy_t; static GCS_BACKEND_DESTROY_FN(dummy_destroy) { dummy_t* dummy = backend->conn; if (!dummy || dummy->state != DUMMY_CLOSED) return -EBADFD; // gu_debug ("Deallocating message queue (serializer)"); gu_fifo_destroy (dummy->gc_q); if (dummy->memb) gu_free (dummy->memb); gu_free (dummy); backend->conn = NULL; return 0; } static GCS_BACKEND_SEND_FN(dummy_send) { int err = 0; dummy_t* dummy = backend->conn; if (gu_unlikely(NULL == dummy)) return -EBADFD; if (gu_likely(DUMMY_PRIM == dummy->state)) { err = gcs_dummy_inject_msg (backend, buf, len, msg_type, backend->conn->my_idx); } else { static long send_error[DUMMY_PRIM] = { -EBADFD, -EBADFD, -ENOTCONN, -EAGAIN }; err = send_error[dummy->state]; } return err; } static GCS_BACKEND_RECV_FN(dummy_recv) { long ret = 0; dummy_t* conn = backend->conn; msg->sender_idx = GCS_SENDER_NONE; msg->type = GCS_MSG_ERROR; assert (conn); /* skip it if we already have popped a message from the queue * in the previous call */ if (gu_likely(DUMMY_CLOSED <= conn->state)) { int err; dummy_msg_t** ptr = static_cast( gu_fifo_get_head (conn->gc_q, &err)); if (gu_likely(ptr != NULL)) { dummy_msg_t* dmsg = *ptr; assert (NULL != dmsg); msg->type = dmsg->type; msg->sender_idx = dmsg->sender_idx; ret = dmsg->len; msg->size = ret; if (gu_likely(dmsg->len <= msg->buf_len)) { gu_fifo_pop_head (conn->gc_q); memcpy (msg->buf, dmsg->buf, dmsg->len); dummy_msg_destroy (dmsg); } else { // supplied recv buffer too short, leave the message in queue memcpy (msg->buf, dmsg->buf, msg->buf_len); gu_fifo_release (conn->gc_q); } } else { ret = -EBADFD; // closing gu_debug ("Returning %ld: %s", ret, strerror(-ret)); } } else { ret = -EBADFD; } return ret; } static GCS_BACKEND_NAME_FN(dummy_name) { return "built-in dummy backend"; } static GCS_BACKEND_MSG_SIZE_FN(dummy_msg_size) { const long max_pkt_size = backend->conn->max_pkt_size; if (pkt_size > max_pkt_size) { gu_warn ("Requested packet size: %ld, maximum possible packet size: %ld", pkt_size, max_pkt_size); return (max_pkt_size - backend->conn->hdr_size); } return (pkt_size - backend->conn->hdr_size); } static GCS_BACKEND_OPEN_FN(dummy_open) { long ret = -ENOMEM; dummy_t* dummy = backend->conn; gcs_comp_msg_t* comp; if (!dummy) { gu_debug ("Backend not initialized"); return -EBADFD; } if (!bootstrap) { dummy->state = DUMMY_TRANS; return 0; } comp = gcs_comp_msg_new (true, false, 0, 1, 0); if (comp) { ret = gcs_comp_msg_add (comp, "11111111-2222-3333-4444-555555555555",0); assert (0 == ret); // we have only one member, index = 0 dummy->state = DUMMY_TRANS; // required by gcs_dummy_set_component() ret = gcs_dummy_set_component (backend, comp); // install new component if (ret >= 0) { // queue the message ret = gcs_comp_msg_size(comp); ret = gcs_dummy_inject_msg (backend, comp, ret, GCS_MSG_COMPONENT, GCS_SENDER_NONE); if (ret > 0) ret = 0; } gcs_comp_msg_delete (comp); } gu_debug ("Opened backend connection: %ld (%s)", ret, strerror(-ret)); return ret; } static GCS_BACKEND_CLOSE_FN(dummy_close) { long ret = -ENOMEM; dummy_t* dummy = backend->conn; gcs_comp_msg_t* comp; if (!dummy) return -EBADFD; comp = gcs_comp_msg_leave (0); if (comp) { ret = gcs_comp_msg_size(comp); ret = gcs_dummy_inject_msg (backend, comp, ret, GCS_MSG_COMPONENT, GCS_SENDER_NONE); // Here's a race condition - some other thread can send something // after leave message. But caller should guarantee serial access. gu_fifo_close (dummy->gc_q); if (ret > 0) ret = 0; gcs_comp_msg_delete (comp); } dummy->state = DUMMY_CLOSED; return ret; } static GCS_BACKEND_PARAM_SET_FN(dummy_param_set) { return 1; } static GCS_BACKEND_PARAM_GET_FN(dummy_param_get) { return NULL; } GCS_BACKEND_STATUS_GET_FN(dummy_status_get) { } GCS_BACKEND_CREATE_FN(gcs_dummy_create) { long ret = -ENOMEM; dummy_t* dummy = NULL; if (!(dummy = GU_CALLOC(1, dummy_t))) goto out0; dummy->state = DUMMY_CLOSED; *(size_t*)(&dummy->max_pkt_size) = (size_t) sysconf (_SC_PAGESIZE); *(size_t*)(&dummy->hdr_size) = sizeof(dummy_msg_t); *(size_t*)(&dummy->max_send_size) = dummy->max_pkt_size - dummy->hdr_size; if (!(dummy->gc_q = gu_fifo_create (1 << 16, sizeof(void*)))) goto out1; backend->conn = NULL; backend->open = dummy_open; backend->close = dummy_close; backend->destroy = dummy_destroy; backend->send = dummy_send; backend->recv = dummy_recv; backend->name = dummy_name; backend->msg_size = dummy_msg_size; backend->param_set = dummy_param_set; backend->param_get = dummy_param_get; backend->status_get = dummy_status_get; backend->conn = dummy; // set data return 0; out1: gu_free (dummy); out0: backend->conn = NULL; return ret; } GCS_BACKEND_REGISTER_FN(gcs_dummy_register) { return false; } /*! Injects a message in the message queue to produce a desired msg sequence. */ long gcs_dummy_inject_msg (gcs_backend_t* backend, const void* buf, size_t buf_len, gcs_msg_type_t type, long sender_idx) { long ret; size_t send_size = buf_len < backend->conn->max_send_size ? buf_len : backend->conn->max_send_size; dummy_msg_t* msg = dummy_msg_create (type, send_size, sender_idx, buf); if (msg) { dummy_msg_t** ptr = static_cast( gu_fifo_get_tail (backend->conn->gc_q)); if (gu_likely(ptr != NULL)) { *ptr = msg; gu_fifo_push_tail (backend->conn->gc_q); ret = send_size; } else { dummy_msg_destroy (msg); ret = -EBADFD; // closed } } else { ret = -ENOMEM; } return ret; } /*! Sets the new component view. * The same component message should be injected in the queue separately * (see gcs_dummy_inject_msg()) in order to model different race conditions */ long gcs_dummy_set_component (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { dummy_t* dummy = backend->conn; long new_num = gcs_comp_msg_num (comp); long i; assert (dummy->state > DUMMY_CLOSED); if (dummy->memb_num != new_num) { void* tmp = gu_realloc (dummy->memb, new_num * sizeof(gcs_comp_memb_t)); if (NULL == tmp) return -ENOMEM; dummy->memb = static_cast(tmp); dummy->memb_num = new_num; } for (i = 0; i < dummy->memb_num; i++) { strcpy ((char*)&dummy->memb[i], gcs_comp_msg_member(comp, i)->id); } dummy->my_idx = gcs_comp_msg_self(comp); dummy->state = gcs_comp_msg_primary(comp) ? DUMMY_PRIM : DUMMY_NON_PRIM; gu_debug ("Setting state to %s", DUMMY_PRIM == dummy->state ? "DUMMY_PRIM" : "DUMMY_NON_PRIM"); return 0; } /*! Is needed to set transitional state */ long gcs_dummy_set_transitional (gcs_backend_t* backend) { backend->conn->state = DUMMY_TRANS; return 0; } galera-4-26.4.25/gcs/src/gcs_comp_msg.hpp000644 000164 177776 00000006161 15107057155 021234 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to component messages * */ #ifndef _gcs_component_h_ #define _gcs_component_h_ #include #include // should accommodate human readable UUID (without trailing \0) #define GCS_COMP_MEMB_ID_MAX_LEN GU_UUID_STR_LEN /*! members of the same segment are physically closer than the others */ typedef uint8_t gcs_segment_t; typedef struct gcs_comp_memb { char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; /// ID assigned by the backend gcs_segment_t segment; } gcs_comp_memb_t; #ifdef GCS_COMP_MSG_ACCESS typedef struct gcs_comp_msg { int my_idx; /// this node's index in membership int memb_num; /// number of members in configuration bool primary; /// 1 if we have a quorum, 0 if not bool bootstrap; /// 1 if primary was bootstrapped int error; /// error code gcs_comp_memb_t memb[1]; /// member array } gcs_comp_msg_t; #else typedef struct gcs_comp_msg gcs_comp_msg_t; #endif /*! Allocates new component message * @param prim whether component is primary or not * @param bootstrap whether prim was bootstrapped * @param my_idx this node index in the membership * @param memb_num number of members in component * @param error error code * @return * allocated message buffer */ extern gcs_comp_msg_t* gcs_comp_msg_new (bool prim, bool bootstrap, int my_idx, int memb_num, int error); /*! Standard empty "leave" component message (to be returned on shutdown) */ extern gcs_comp_msg_t* gcs_comp_msg_leave (int error); /*! Destroys component message */ extern void gcs_comp_msg_delete (gcs_comp_msg_t* comp); /*! Adds a member to the component message * Returns an index of the member or negative error code: * -1 when membership is full * -ENOTUNIQ when name collides with one that is in membership already * -ENAMETOOLONG wnen memory allocation for new name fails */ extern int gcs_comp_msg_add (gcs_comp_msg_t* comp, const char* id, gcs_segment_t segment); /*! Returns total size of the component message */ extern int gcs_comp_msg_size (const gcs_comp_msg_t* comp); /*! Creates a copy of the component message */ extern gcs_comp_msg_t* gcs_comp_msg_copy (const gcs_comp_msg_t* comp); /*! Returns member ID by index, NULL if none */ extern const gcs_comp_memb_t* gcs_comp_msg_member (const gcs_comp_msg_t* comp, int idx); /*! Returns member index by ID, -1 if none */ extern int gcs_comp_msg_idx (const gcs_comp_msg_t* comp, const char* id); /*! Returns primary status of the component */ extern bool gcs_comp_msg_primary (const gcs_comp_msg_t* comp); /*! Returns bootstrap flag */ extern bool gcs_comp_msg_bootstrap(const gcs_comp_msg_t* comp); /*! Returns our own idx */ extern int gcs_comp_msg_self (const gcs_comp_msg_t* comp); /*! Returns number of members in the component */ extern int gcs_comp_msg_num (const gcs_comp_msg_t* comp); /*! Returns error code of the component message */ extern int gcs_comp_msg_error(const gcs_comp_msg_t* comp); #endif /* _gcs_component_h_ */ galera-4-26.4.25/gcs/src/gcs_node.hpp000644 000164 177776 00000011744 15107057155 020360 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ /*! * Node context */ #ifndef _gcs_node_h_ #define _gcs_node_h_ #include "gcs.hpp" #include "gcs_defrag.hpp" #include "gcs_comp_msg.hpp" #include "gcs_state_msg.hpp" #include #define NODE_NO_ID "undefined" #define NODE_NO_NAME "unspecified" #define NODE_NO_ADDR "unspecified" #define GCS_NO_VOTE_SEQNO GCS_SEQNO_ILL struct gcs_node { gcs_defrag_t app; // defragmenter for application actions gcs_defrag_t oob; // defragmenter for out-of-band service acts. // globally unique id from a component message char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; // to track snapshot status char joiner[GCS_COMP_MEMB_ID_MAX_LEN + 1]; char donor [GCS_COMP_MEMB_ID_MAX_LEN + 1]; const char* name; // human-given name const char* inc_addr; // incoming address - for load balancer const gcs_state_msg_t* state_msg;// state message gcs_seqno_t last_applied; // last applied action on that node gcs_seqno_t vote_seqno; int64_t vote_res; int gcs_proto_ver;// supported protocol versions int repl_proto_ver; int appl_proto_ver; int desync_count; gcs_node_state_t status; // node status gcs_segment_t segment; bool count_last_applied; // should it be counted bool bootstrap; // is part of prim comp bootstrap process bool stateless; }; typedef struct gcs_node gcs_node_t; /*! Initialize node context */ extern void gcs_node_init (gcs_node_t* node, gcache_t* gcache, const char* id, const char* name, ///< can be null const char* inc_addr, ///< can be null int gcs_proto_ver, int repl_proto_ver, int appl_proto_ver, gcs_segment_t segment, bool stateless); /*! Move data from one node object to another */ extern void gcs_node_move (gcs_node_t* dest, gcs_node_t* src); /*! Deallocate resources associated with the node object */ extern void gcs_node_free (gcs_node_t* node); /*! Reset node's receive buffers */ extern void gcs_node_reset (gcs_node_t* node); /*! Mark node's buffers as reset, but don't do it actually (local node only) */ extern void gcs_node_reset_local (gcs_node_t* node); /*! * Handles action message. Is called often - therefore, inlined * * @return */ static inline ssize_t gcs_node_handle_act_frag (gcs_node_t* node, const gcs_act_frag_t* frg, struct gcs_act* act, bool local) { ssize_t ret; if (gu_likely(GCS_ACT_SERVICE != frg->act_type)) { ret = gcs_defrag_handle_frag (&node->app, frg, act, local); } else if (GCS_ACT_SERVICE == frg->act_type) { ret = gcs_defrag_handle_frag (&node->oob, frg, act, local); } else { gu_warn ("Unrecognised action type: %d", frg->act_type); assert(0); ret = -EPROTO; } return ret; } static inline void gcs_node_set_last_applied (gcs_node_t* node, gcs_seqno_t seqno) { assert(seqno >= 0); if (gu_unlikely(seqno <= node->last_applied)) { if (node->count_last_applied) { gu_warn("Received bogus LAST message: %lld from node %s, " "expected > %lld. Ignoring.", (long long)seqno, node->id, (long long)node->last_applied); } } else { node->last_applied = seqno; } } extern void gcs_node_set_vote (gcs_node_t* node, gcs_seqno_t seqno, int64_t vote, int gcs_ptoto); static inline gcs_seqno_t gcs_node_get_last_applied (gcs_node_t* node) { return node->last_applied; } /*! Record state message from the node */ extern void gcs_node_record_state (gcs_node_t* node, gcs_state_msg_t* state); /*! Update node status according to quorum decisions */ extern void gcs_node_update_status (gcs_node_t* node, const gcs_state_quorum_t* quorum); static inline gcs_node_state_t gcs_node_get_status (const gcs_node_t* node) { return node->status; } static inline gcs_seqno_t gcs_node_cached (const gcs_node_t* node) { /* node->state_msg check is needed in NON-PRIM situations, where no * state message exchange happens */ if (node->state_msg) return gcs_state_msg_cached(node->state_msg); else return GCS_SEQNO_ILL; } static inline uint8_t gcs_node_flags (const gcs_node_t* node) { return gcs_state_msg_flags(node->state_msg); } static inline bool gcs_node_is_joined (const gcs_node_state_t st) { return (st >= GCS_NODE_STATE_DONOR); } extern void gcs_node_print(std::ostream& os, const gcs_node_t& node); static inline std::ostream& operator << (std::ostream& os, const gcs_node_t& node) { gcs_node_print(os, node); return os; } #endif /* _gcs_node_h_ */ galera-4-26.4.25/gcs/src/gcs_fifo_lite.cpp000644 000164 177776 00000010366 15107057155 021365 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ * * FIFO "class" customized for particular purpose * (here I decided to sacrifice generality for efficiency). * Implements simple fixed size "mallocless" FIFO. * Except gcs_fifo_create() there are two types of fifo * access methods - protected and unprotected. Unprotected * methods assume that calling routines implement their own * protection, and thus are simplified for speed. */ #include #include "gcs_fifo_lite.hpp" /* Creates FIFO object. Since it practically consists of array of (void*), * the length can be chosen arbitrarily high - to minimize the risk * of overflow situation. */ gcs_fifo_lite_t* gcs_fifo_lite_create (size_t length, size_t item_size) { gcs_fifo_lite_t* ret = NULL; uint64_t l = 1; /* check limits */ if (length < 1 || item_size < 1) return NULL; /* Find real length. It must be power of 2*/ while (l < length) l = l << 1; if (item_size > (uint64_t)(GU_LONG_MAX/l)) { gu_error ("Resulting FIFO size %llu * %lu exceeds signed limit: %lld", (unsigned long long)(item_size), l, (signed long long)GU_LONG_MAX); return NULL; } ret = GU_CALLOC (1, gcs_fifo_lite_t); if (ret) { ret->length = l; ret->item_size = item_size; ret->mask = ret->length - 1; ret->closed = true; assert(item_size < (uint64_t)(GU_LONG_MAX/ ret->length)); ret->queue = gu_malloc (ret->length * item_size); if (ret->queue) { gu_mutex_init (&ret->lock, NULL); gu_cond_init (&ret->put_cond, NULL); gu_cond_init (&ret->get_cond, NULL); /* everything else must be initialized to 0 by calloc */ } else { gu_free (ret); ret = NULL; } } return ret; } void gcs_fifo_lite_close (gcs_fifo_lite_t* fifo) { GCS_FIFO_LITE_LOCK; if (fifo->closed) { gu_error ("Trying to close a closed FIFO"); assert(0); } else { fifo->closed = true; // wake whoever is waiting fifo->put_wait = 0; gu_cond_broadcast (&fifo->put_cond); fifo->get_wait = 0; gu_cond_broadcast (&fifo->get_cond); } gu_mutex_unlock (&fifo->lock); } void gcs_fifo_lite_open (gcs_fifo_lite_t* fifo) { GCS_FIFO_LITE_LOCK; if (!fifo->closed) { gu_error ("Trying to open an open FIFO."); assert(0); } else { fifo->closed = false; } gu_mutex_unlock(&fifo->lock); } long gcs_fifo_lite_destroy (gcs_fifo_lite_t* f) { if (f) { if (gu_mutex_lock (&f->lock)) { abort(); } if (f->destroyed) { gu_mutex_unlock (&f->lock); return -EALREADY; } f->closed = true; f->destroyed = true; /* get rid of "put" threads waiting for lock or signal */ while (gu_cond_destroy (&f->put_cond)) { if (f->put_wait <= 0) { gu_fatal ("Can't destroy condition while nobody's waiting"); abort(); } f->put_wait = 0; gu_cond_broadcast (&f->put_cond); } while (f->used) { /* there are some items in FIFO - and that means * no gcs_fifo_lite_safe_get() is waiting on condition */ gu_mutex_unlock (&f->lock); /* let them get remaining items from FIFO, * we don't know how to deallocate them ourselves. * unfortunately this may take some time */ usleep (10000); /* sleep a bit to avoid busy loop */ gu_mutex_lock (&f->lock); } f->length = 0; /* now all we have - "get" threads waiting for lock or signal */ while (gu_cond_destroy (&f->get_cond)) { if (f->get_wait <= 0) { gu_fatal ("Can't destroy condition while nobody's waiting"); abort(); } f->get_wait = 0; gu_cond_broadcast (&f->get_cond); } /* at this point there are only functions waiting for lock */ gu_mutex_unlock (&f->lock); while (gu_mutex_destroy (&f->lock)) { /* this should be fast provided safe get and safe put are * wtitten correctly. They should immediately freak out. */ gu_mutex_lock (&f->lock); gu_mutex_unlock (&f->lock); } /* now nobody's waiting for anything */ gu_free (f->queue); gu_free (f); return 0; } return -EINVAL; } galera-4-26.4.25/gcs/src/gcs_code_msg.hpp000644 000164 177776 00000003061 15107057155 021204 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015 Codership Oy */ #ifndef _gcs_code_msg_hpp_ #define _gcs_code_msg_hpp_ #include #include #include #include namespace gcs { namespace core { /* helper class to hold code message in serialized form */ class CodeMsg { union Msg { gu_byte_t buf_[32]; struct { gu_uuid_t uuid_; int64_t seqno_; int64_t code_; } s_; } msg_; // ensure that union is properly packed GU_COMPILE_ASSERT(sizeof(Msg) == sizeof(Msg().buf_), msg_not_packed); public: CodeMsg(const gu::GTID& gtid, int64_t code) { msg_.s_.uuid_ = gtid.uuid()(); msg_.s_.seqno_ = gu::htog(gtid.seqno()); msg_.s_.code_ = gu::htog(code); } void unserialize(gu::GTID& gtid, int64_t& code) const { gtid.set(msg_.s_.uuid_); gtid.set(gu::gtoh(msg_.s_.seqno_)); code = gu::gtoh(msg_.s_.code_); } const gu_uuid_t& uuid() const { return msg_.s_.uuid_; } int64_t seqno() const { return gu::gtoh(msg_.s_.seqno_); } int64_t code() const { return gu::gtoh(msg_.s_.code_); } const void* operator()() const { return &msg_; } static int serial_size() { return sizeof(Msg); } void print(std::ostream& os) const; }; /* class CodeMsg */ static inline std::ostream& operator << (std::ostream& os, const CodeMsg& msg) { msg.print(os); return os; } } /* namespace core */ } /* namespace gcs */ #endif /* _gcs_code_msg_h_ */ galera-4-26.4.25/gcs/src/gcs_fc.cpp000644 000164 177776 00000015164 15107057155 020016 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010 Codership Oy * * $Id$ */ /*! @file This unit contains Flow Control parts deemed worthy to be * taken out of gcs.c */ #include "gcs_fc.hpp" #include #include double const gcs_fc_hard_limit_fix = 0.9; //! allow for some overhead static double const min_sleep = 0.001; //! minimum sleep period (s) /*! Initializes operational constants before opening connection to group * @return -EINVAL if wrong values are submitted */ int gcs_fc_init (gcs_fc_t* fc, ssize_t hard_limit, // slave queue hard limit double soft_limit, // soft limit as a fraction of hard limit double max_throttle) { assert (fc); if (hard_limit < 0) { gu_error ("Bad value for slave queue hard limit: %zd (should be > 0)", hard_limit); return -EINVAL; } if (soft_limit < 0.0 || soft_limit >= 1.0) { gu_error ("Bad value for slave queue soft limit: %f " "(should belong to [0.0,1.0) )", soft_limit); return -EINVAL; } if (max_throttle < 0.0 || max_throttle >= 1.0) { gu_error ("Bad value for max throttle: %f " "(should belong to [0.0,1.0) )", max_throttle); return -EINVAL; } memset (fc, 0, sizeof(*fc)); fc->hard_limit = hard_limit; fc->soft_limit = fc->hard_limit * soft_limit; fc->max_throttle = max_throttle; return 0; } /*! Reinitializes object at the beginning of state transfer */ void gcs_fc_reset (gcs_fc_t* const fc, ssize_t const queue_size) { assert (fc != NULL); assert (queue_size >= 0); fc->init_size = queue_size; fc->size = fc->init_size; fc->start = gu_time_monotonic(); fc->last_sleep = 0; fc->act_count = 0; fc->max_rate = -1.0; fc->scale = 0.0; fc->offset = 0.0; fc->sleep_count= 0; fc->sleeps = 0.0; } /* * The idea here is that there is no flow control up until slave queue size * reaches soft limit. * After that flow control gradually slows down replication rate by emitting FC * events in order to buy more time for state transfer. * Replication rate goes linearly from normal rate at soft limit to max_throttle * fraction at hard limit, at which point -ENOMEM is returned as replication * becomes prohibitively slow. * * replication * speed * ^ * |--------. <- normal replication rate * | .\ * | . \ * | . \ * | . \ speed = fc->size * fc->scale + fc->offset * | . \ * | . \ * | . \ | * | . \ | * | . \| * | . + <- throttle limit * | . | * | . | * +--------+---------+----> slave queue size * soft hard * limit limit */ /*! Processes a new action added to a slave queue. * @return length of sleep in nanoseconds or negative error code * or GU_TIME_ETERNITY for complete stop */ long long gcs_fc_process (gcs_fc_t* fc, ssize_t act_size) { fc->size += act_size; fc->act_count++; if (fc->size <= fc->soft_limit) { /* normal operation */ if (gu_unlikely(fc->debug > 0 && !(fc->act_count % fc->debug))) { gu_info ("FC: queue size: %zdb (%4.1f%% of soft limit)", fc->size, ((double)fc->size)/fc->soft_limit*100.0); } return 0; } else if (fc->size >= fc->hard_limit) { if (0.0 == fc->max_throttle) { /* we can accept total service outage */ return GU_TIME_ETERNITY; } else { gu_error ("Recv queue hard limit exceeded. Can't continue."); return -ENOMEM; } } // else if (!(fc->act_count & 7)) { // do this for every 8th action else { long long end = gu_time_monotonic(); double interval = ((end - fc->start) * 1.0e-9); if (gu_unlikely (0 == fc->last_sleep)) { /* just tripped the soft limit, preparing constants for throttle */ fc->max_rate = (double)(fc->size - fc->init_size) / interval; double s = (1.0 - fc->max_throttle)/(fc->soft_limit-fc->hard_limit); assert (s < 0.0); fc->scale = s * fc->max_rate; fc->offset = (1.0 - s*fc->soft_limit) * fc->max_rate; // calculate time interval from the soft limit interval = interval * (double)(fc->size - fc->soft_limit) / (fc->size - fc->init_size); assert (interval >= 0.0); // Move reference point to soft limit fc->last_sleep = fc->soft_limit; fc->start = end - interval * 1000000000; gu_warn("Soft recv queue limit exceeded, starting replication " "throttle. Measured avg. rate: %f bytes/sec; " "Throttle parameters: scale=%f, offset=%f", fc->max_rate, fc->scale, fc->offset); } /* throttling operation */ double desired_rate = fc->size * fc->scale + fc->offset; // linear decay //double desired_rate = fc->max_rate * fc->max_throttle; // square wave assert (desired_rate <= fc->max_rate); double sleep = (double)(fc->size - fc->last_sleep) / desired_rate - interval; if (gu_unlikely(fc->debug > 0 && !(fc->act_count % fc->debug))) { gu_info ("FC: queue size: %zdb, length: %zd, " "measured rate: %fb/s, desired rate: %fb/s, " "interval: %5.3fs, sleep: %5.4fs. " "Sleeps initiated: %zd, for a total of %6.3fs", fc->size, fc->act_count, ((double)(fc->size - fc->last_sleep))/interval, desired_rate, interval, sleep, fc->sleep_count, fc->sleeps); fc->sleep_count = 0; fc->sleeps = 0.0; } if (gu_likely(sleep < min_sleep)) { #if 0 gu_info ("Skipping sleep: desired_rate = %f, sleep = %f (%f), " "interval = %f, fc->scale = %f, fc->offset = %f, " "fc->size = %zd", desired_rate, sleep, min_sleep, interval, fc->scale, fc->offset, fc->size); #endif return 0; } fc->last_sleep = fc->size; fc->start = end; fc->sleep_count++; fc->sleeps += sleep; return (1000000000LL * sleep); } return 0; } void gcs_fc_debug (gcs_fc_t* fc, long debug_level) { fc->debug = debug_level; } galera-4-26.4.25/gcs/src/gcs.cpp000644 000164 177776 00000236262 15107057155 017352 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ /* * Top-level application interface implementation. */ #include "gcs_priv.hpp" #include "gcs_params.hpp" #include "gcs_fc.hpp" #include "gcs_seqno.hpp" #include "gcs_core.hpp" #include "gcs_fifo_lite.hpp" #include "gcs_sm.hpp" #include "gcs_gcache.hpp" #include "gcs_error.hpp" #include #include #include #include #include #include #include #include #include #include #include const char* gcs_node_state_to_str (gcs_node_state_t state) { static const char* str[GCS_NODE_STATE_MAX + 1] = { "NON-PRIMARY", "PRIMARY", "JOINER", "DONOR", "JOINED", "SYNCED", "UNKNOWN" }; if (state < GCS_NODE_STATE_MAX) return str[state]; return str[GCS_NODE_STATE_MAX]; } const char* gcs_act_type_to_str (gcs_act_type_t type) { static const char* str[GCS_ACT_UNKNOWN + 1] = { "WRITESET", "COMMIT_CUT", "STATE_REQUEST", "CONFIGURATION", "JOIN", "SYNC", "FLOW", "VOTE", "SERVICE", "ERROR", "UNKNOWN" }; if (type < GCS_ACT_UNKNOWN) return str[type]; return str[GCS_ACT_UNKNOWN]; } std::ostream& operator <<(std::ostream& os, const gcs_action& act) { os << gcs_act_type_to_str(act.type) << ", g: " << act.seqno_g << ", l: " << act.seqno_l << ", ptr: " << act.buf << ", size: " << act.size; return os; } static const long GCS_MAX_REPL_THREADS = 16384; typedef enum { GCS_CONN_SYNCED, // caught up with the rest of the group GCS_CONN_JOINED, // state transfer complete GCS_CONN_DONOR, // in state transfer, donor GCS_CONN_JOINER, // in state transfer, joiner GCS_CONN_PRIMARY, // in primary conf, needs state transfer GCS_CONN_OPEN, // just connected to group, non-primary GCS_CONN_CLOSED, GCS_CONN_DESTROYED, GCS_CONN_ERROR, GCS_CONN_STATE_MAX } gcs_conn_state_t; #define GCS_CLOSED_ERROR -EBADFD; // file descriptor in bad state static const char* gcs_conn_state_str[GCS_CONN_STATE_MAX] = { "SYNCED", "JOINED", "DONOR/DESYNCED", "JOINER", "PRIMARY", "OPEN", "CLOSED", "DESTROYED", "ERROR" }; static bool const GCS_FC_STOP = true; static bool const GCS_FC_CONT = false; /** Flow control message */ struct gcs_fc_event { uint32_t conf_id; // least significant part of configuraiton seqno uint32_t stop; // boolean value } __attribute__((__packed__)); struct gcs_conn { gcs_conn(gu::Config& conf, gcache_t* gcache, gu::Progress::Callback* progress_cb, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver); ~gcs_conn(); gu::UUID group_uuid; char* my_name; char* channel; char* socket; int my_idx; int memb_num; gcs_conn_state_t state; gu_config_t* config; struct gcs_params params; gcache_t* gcache; gcs_sm_t* sm; gcs_seqno_t local_act_id; /* local seqno of the action */ gcs_seqno_t global_seqno; /* A queue for threads waiting for replicated actions */ gcs_fifo_lite_t* repl_q; gu_thread_t send_thread; /* A queue for threads waiting for received actions */ gu_fifo_t* recv_q; ssize_t recv_q_size; gu_thread_t recv_thread; /* Message receiving timeout - absolute date in nanoseconds */ long long timeout; /* Flow Control */ gu_mutex_t fc_lock; gcs_fc_t stfc; // state transfer FC object int stop_sent_; // how many STOPs - CONTs were sent int stop_sent() { #ifdef GU_DEBUG_MUTEX assert(gu_mutex_owned(&fc_lock)); #endif return stop_sent_; } void stop_sent_inc(int val) { #ifdef GU_DEBUG_MUTEX assert(gu_mutex_owned(&fc_lock)); #endif stop_sent_ += val; assert(stop_sent_ > 0); } void stop_sent_dec(int val) { #ifdef GU_DEBUG_MUTEX assert(gu_mutex_owned(&fc_lock)); #endif assert(stop_sent_ >= val); assert(stop_sent_ > 0); stop_sent_ -= val; } long stop_count; // counts stop requests received long queue_len; // slave queue length long upper_limit; // upper slave queue limit long lower_limit; // lower slave queue limit long fc_offset; // offset for catchup phase gcs_conn_state_t max_fc_state; // maximum state when FC is enabled long stats_fc_stop_sent; // FC stats counters long stats_fc_cont_sent; // long stats_fc_received; // uint32_t conf_id; // configuration ID /* #603, #606 join control */ bool need_to_join; gu::GTID join_gtid; int join_code; /* sync control */ bool sync_sent_; bool sync_sent() const { assert(gu_fifo_locked(recv_q)); return sync_sent_; } void sync_sent(bool const val) { assert(gu_fifo_locked(recv_q)); sync_sent_ = val; } /* gcs_core object */ gcs_core_t* core; // the context that is returned by // the core group communication system /* error vote */ gu_mutex_t vote_lock_; gu_cond_t vote_cond_; gu::GTID vote_gtid_; int64_t vote_res_; bool vote_wait_; int vote_err_; int inner_close_count; // how many times _close has been called. int outer_close_count; // how many times gcs_close has been called. /* JOINED -> SYNCED catch-up progress */ gu::Progress::Callback* progress_cb_; gu::Progress* progress_; }; // Oh C++, where art thou? struct gcs_recv_act { struct gcs_act_rcvd rcvd; gcs_seqno_t local_id; }; struct gcs_repl_act { const struct gu_buf* act_in; struct gcs_action* action; gu_mutex_t wait_mutex; gu_cond_t wait_cond; gcs_repl_act(const struct gu_buf* a_act_in, struct gcs_action* a_action) : act_in(a_act_in), action(a_action) { } }; gcs_conn::gcs_conn(gu::Config& conf, gcache_t* cache, gu::Progress::Callback* const progress_cb, const char* const node_name, const char* const inc_addr, int const repl_proto_ver, int const appl_proto_ver) : group_uuid(), my_name(), channel(), socket(), my_idx(), memb_num(), state(GCS_CONN_DESTROYED), config(reinterpret_cast(&conf)), params(conf), gcache(cache), sm(), local_act_id(), global_seqno(), repl_q(), send_thread(), recv_q(), recv_q_size(), recv_thread(), timeout(), fc_lock(), stfc(), stop_sent_(), stop_count(), queue_len(), upper_limit(), lower_limit(), fc_offset(), max_fc_state(), stats_fc_stop_sent(), stats_fc_cont_sent(), stats_fc_received(), conf_id(), need_to_join(), join_gtid(), join_code(), sync_sent_(), core(), vote_lock_(), vote_cond_(), vote_gtid_(), vote_res_(), vote_wait_(), vote_err_(), inner_close_count(), outer_close_count(), progress_cb_(progress_cb), progress_() { auto conn(this); // to minimize diff if (gcs_fc_init (&conn->stfc, conn->params.recv_q_hard_limit, conn->params.recv_q_soft_limit, conn->params.max_throttle)) { gu_error ("FC initialization failed"); goto fc_init_failed; } conn->state = GCS_CONN_DESTROYED; conn->core = gcs_core_create (conf, conn->gcache, node_name, inc_addr, repl_proto_ver, appl_proto_ver); if (!conn->core) { gu_error ("Failed to create core."); goto core_create_failed; } conn->repl_q = gcs_fifo_lite_create (GCS_MAX_REPL_THREADS, sizeof (struct gcs_repl_act*)); if (!conn->repl_q) { gu_error ("Failed to create repl_q."); goto repl_q_failed; } { size_t recv_q_len = gu_avphys_bytes() / sizeof(struct gcs_recv_act) / 4; gu_debug ("Requesting recv queue len: %zu", recv_q_len); conn->recv_q = gu_fifo_create (recv_q_len, sizeof(struct gcs_recv_act)); } if (!conn->recv_q) { gu_error ("Failed to create recv_q."); goto recv_q_failed; } conn->sm = gcs_sm_create(1<<16, 1); if (!conn->sm) { gu_error ("Failed to create send monitor"); goto sm_create_failed; } assert(conn->group_uuid == GU_UUID_NIL); conn->state = GCS_CONN_CLOSED; conn->my_idx = -1; conn->local_act_id = GCS_SEQNO_FIRST; conn->global_seqno = 0; conn->fc_offset = 0; conn->timeout = GU_TIME_ETERNITY; conn->max_fc_state = conn->params.sync_donor ? GCS_CONN_DONOR : GCS_CONN_JOINED; gu_mutex_init (&conn->fc_lock, NULL); gu_mutex_init (&conn->vote_lock_, NULL); gu_cond_init (&conn->vote_cond_, NULL); conn->progress_cb_ = progress_cb; conn->progress_ = NULL; return; // success sm_create_failed: gu_fifo_destroy (conn->recv_q); recv_q_failed: gcs_fifo_lite_destroy (conn->repl_q); repl_q_failed: gcs_core_destroy (conn->core); core_create_failed: fc_init_failed: gu_throw_fatal << "Failed to create GCS connection handle."; } /* Creates a group connection handle */ gcs_conn_t* gcs_create (gu::Config& conf, gcache_t* gcache, gu::Progress::Callback* const progress_cb, const char* const node_name, const char* const inc_addr, int const repl_proto_ver, int const appl_proto_ver) { try { return new gcs_conn(conf, gcache, progress_cb, node_name, inc_addr, repl_proto_ver, appl_proto_ver); } catch (...) { return nullptr; } } long gcs_init (gcs_conn_t* conn, const gu::GTID& position) { if (GCS_CONN_CLOSED == conn->state) { return gcs_core_init (conn->core, position); } else { gu_error ("State must be CLOSED"); if (conn->state < GCS_CONN_CLOSED) return -EBUSY; else // DESTROYED return -EBADFD; } } /*! * Checks if we should freak out on send/recv errors. * Sometimes errors are ok, e.g. when attempting to send FC_CONT message * on a closing connection. This can happen because GCS connection state * change propagation from lower layers to upper layers is not atomic. * * @param err error code returned by send/recv function * @param warning warning to log if necessary * @return 0 if error can be ignored, original err value if not */ static int gcs_check_error (int err, const char* warning) { switch (err) { case -ENOTCONN: case -ECONNABORTED: if (NULL != warning) { gu_info ("%s: %d (%s)", warning, err, gcs_error_str(-err)); } err = 0; break; default:; } return err; } static inline long gcs_send_fc_event (gcs_conn_t* conn, bool stop) { struct gcs_fc_event fc = { htogl(conn->conf_id), stop }; return gcs_core_send_fc (conn->core, &fc, sizeof(fc)); } /* To be called under slave queue lock. Returns true if FC_STOP must be sent */ static inline bool gcs_fc_stop_begin (gcs_conn_t* conn) { long err = 0; bool ret = (conn->stop_count <= 0 && conn->stop_sent_ <= 0 && conn->queue_len > (conn->upper_limit + conn->fc_offset) && conn->state <= conn->max_fc_state && !(err = gu_mutex_lock (&conn->fc_lock))); if (gu_unlikely(err)) { gu_fatal ("Mutex lock failed: %ld (%s)", err, strerror(err)); abort(); } return ret; } /* Complement to gcs_fc_stop_begin. */ static inline int gcs_fc_stop_end (gcs_conn_t* conn) { #ifdef GU_DEBUG_MUTEX assert(gu_mutex_owned(&conn->fc_lock)); #endif int ret = 0; if (conn->stop_sent() <= 0) { conn->stop_sent_inc(1); gu_mutex_unlock (&conn->fc_lock); ret = gcs_send_fc_event (conn, GCS_FC_STOP); gu_mutex_lock (&conn->fc_lock); if (ret >= 0) { ret = 0; conn->stats_fc_stop_sent++; } else { assert (conn->stop_sent() > 0); /* restore counter */ conn->stop_sent_dec(1); } gu_debug("SENDING FC_STOP (local seqno: %" PRId64 ", fc_offset: %ld): %d", conn->local_act_id, conn->fc_offset, ret); } else { gu_debug ("SKIPPED FC_STOP sending: stop_sent = %d", conn->stop_sent()); } gu_mutex_unlock (&conn->fc_lock); ret = gcs_check_error (ret, "Failed to send FC_STOP signal"); return ret; } /* To be called under slave queue lock. Returns true if FC_CONT must be sent */ static inline bool gcs_fc_cont_begin (gcs_conn_t* conn) { long err = 0; bool queue_decreased = (conn->fc_offset > conn->queue_len && (conn->fc_offset = conn->queue_len, true)); bool ret = (conn->stop_sent_ > 0 && (conn->lower_limit >= conn->queue_len || queue_decreased) && conn->state <= conn->max_fc_state && !(err = gu_mutex_lock (&conn->fc_lock))); if (gu_unlikely(err)) { gu_fatal ("Mutex lock failed: %ld (%s)", err, strerror(err)); abort(); } return ret; } /* Complement to gcs_fc_cont_begin() */ static inline int gcs_fc_cont_end (gcs_conn_t* conn) { int ret = 0; assert (GCS_CONN_JOINER >= conn->state); if (conn->stop_sent()) { conn->stop_sent_dec(1); gu_mutex_unlock (&conn->fc_lock); ret = gcs_send_fc_event (conn, GCS_FC_CONT); gu_mutex_lock (&conn->fc_lock); if (gu_likely (ret >= 0)) { ret = 0; conn->stats_fc_cont_sent++; } else { /* restore counter */ conn->stop_sent_inc(1); } gu_debug("SENDING FC_CONT (local seqno: %" PRId64 ", fc_offset: %ld): %d", conn->local_act_id, conn->fc_offset, ret); } else { gu_debug ("SKIPPED FC_CONT sending: stop_sent = %d", conn->stop_sent()); } gu_mutex_unlock (&conn->fc_lock); ret = gcs_check_error (ret, "Failed to send FC_CONT signal"); return ret; } /* To be called under slave queue lock. Returns true if SYNC must be sent */ static inline bool gcs_send_sync_begin (gcs_conn_t* conn) { if (gu_unlikely(GCS_CONN_JOINED == conn->state)) { if (conn->lower_limit >= conn->queue_len && !conn->sync_sent()) { // tripped lower slave queue limit, send SYNC message conn->sync_sent(true); #if 0 gu_info ("Sending SYNC: state = %s, queue_len = %ld, " "lower_limit = %ld, sync_sent = %s", gcs_conn_state_str[conn->state], conn->queue_len, conn->lower_limit, conn->sync_sent() ? "true" : "false"); #endif return true; } #if 0 else { gu_info ("Not sending SYNC: state = %s, queue_len = %ld, " "lower_limit = %ld, sync_sent = %s", gcs_conn_state_str[conn->state], conn->queue_len, conn->lower_limit, conn->sync_sent() ? "true" : "false"); } #endif } return false; } static inline long gcs_send_sync_end (gcs_conn_t* conn) { long ret = 0; gu_debug ("SENDING SYNC"); ret = gcs_core_send_sync (conn->core, gu::GTID(conn->group_uuid, conn->global_seqno)); if (gu_likely (ret >= 0)) { ret = 0; } else { gu_fifo_lock(conn->recv_q); conn->sync_sent(false); gu_fifo_release(conn->recv_q); } ret = gcs_check_error (ret, "Failed to send SYNC signal"); return ret; } static inline long gcs_send_sync (gcs_conn_t* conn) { gu_fifo_lock(conn->recv_q); bool const send_sync(gcs_send_sync_begin (conn)); gu_fifo_release(conn->recv_q); if (send_sync) { return gcs_send_sync_end (conn); } else { return 0; } } /*! * State transition functions - just in case we want to add something there. * @todo: need to be reworked, see #231 */ static bool gcs_shift_state (gcs_conn_t* const conn, gcs_conn_state_t const new_state) { static const bool allowed [GCS_CONN_STATE_MAX][GCS_CONN_STATE_MAX] = { // SYNCED JOINED DONOR JOINER PRIM OPEN CLOSED DESTR { false, true, false, false, false, false, false, false }, // SYNCED { false, false, true, true, false, false, false, false }, // JOINED { true, true, true, false, false, false, false, false }, // DONOR { false, false, false, false, true, false, false, false }, // JOINER { true, true, true, true, true, true, false, false }, // PRIMARY { true, true, true, true, true, false, true, false }, // OPEN { true, true, true, true, true, true, false, false }, // CLOSED { false, false, false, false, false, false, true, false } // DESTROYED }; gcs_conn_state_t const old_state = conn->state; if (!allowed[new_state][old_state]) { if (old_state != new_state) { gu_warn("GCS: Shifting %s -> %s is not allowed (TO: %" PRId64 ")", gcs_conn_state_str[old_state], gcs_conn_state_str[new_state], conn->global_seqno); } return false; } if (old_state != new_state) { gu_info("Shifting %s -> %s (TO: %" PRId64 ")", gcs_conn_state_str[old_state], gcs_conn_state_str[new_state], conn->global_seqno); conn->state = new_state; } return true; } static void gcs_become_open (gcs_conn_t* conn) { gcs_shift_state (conn, GCS_CONN_OPEN); } static long gcs_set_pkt_size (gcs_conn_t *conn, long pkt_size) { if (conn->state != GCS_CONN_CLOSED) return -EPERM; // #600 workaround long ret = gcs_core_set_pkt_size (conn->core, pkt_size); if (ret >= 0) { conn->params.max_packet_size = ret; gu_config_set_int64 (conn->config, GCS_PARAMS_MAX_PKT_SIZE, conn->params.max_packet_size); } return ret; } static int _release_flow_control (gcs_conn_t* conn) { int err = 0; if (gu_unlikely(err = gu_mutex_lock (&conn->fc_lock))) { gu_fatal ("FC mutex lock failed: %d (%s)", err, strerror(err)); abort(); } if (conn->stop_sent()) { assert (1 == conn->stop_sent()); err = gcs_fc_cont_end (conn); } else { gu_mutex_unlock (&conn->fc_lock); } return err; } static void gcs_become_primary (gcs_conn_t* conn) { assert(conn->join_gtid.seqno() <= 0 || conn->state == GCS_CONN_PRIMARY || conn->state == GCS_CONN_JOINER || conn->state == GCS_CONN_OPEN /* joiner that has received NON_PRIM*/); if (!gcs_shift_state (conn, GCS_CONN_PRIMARY)) { gu_fatal ("Protocol violation, can't continue"); gcs_close (conn); abort(); } conn->join_gtid = gu::GTID(); conn->need_to_join = false; int ret; if ((ret = _release_flow_control (conn))) { gu_fatal ("Failed to release flow control: %d (%s)", ret, gcs_error_str(ret)); gcs_close (conn); abort(); } } static void gcs_become_joiner (gcs_conn_t* conn) { if (!gcs_shift_state (conn, GCS_CONN_JOINER)) { gu_fatal ("Protocol violation, can't continue"); assert (0); abort(); } if (gcs_fc_init (&conn->stfc, conn->params.recv_q_hard_limit, conn->params.recv_q_soft_limit, conn->params.max_throttle)) { gu_fatal ("Becoming JOINER: FC initialization failed, can't continue."); abort(); } gcs_fc_reset (&conn->stfc, conn->recv_q_size); gcs_fc_debug (&conn->stfc, conn->params.fc_debug); } // returns 1 if accepts, 0 if rejects, negative error code if fails. static long gcs_become_donor (gcs_conn_t* conn) { if (gcs_shift_state (conn, GCS_CONN_DONOR)) { long err = 0; if (conn->max_fc_state < GCS_CONN_DONOR) { err = _release_flow_control (conn); } return (0 == err ? 1 : err); } gu_warn ("Rejecting State Transfer Request in state '%s'. " "Joiner should be restarted.", gcs_conn_state_str[conn->state]); if (conn->state < GCS_CONN_OPEN){ ssize_t err; gu_warn ("Received State Transfer Request in wrong state %s. " "Rejecting.", gcs_conn_state_str[conn->state]); // reject the request. // error handling currently is way too simplistic err = gcs_join (conn, gu::GTID(conn->group_uuid, conn->global_seqno), -EPROTO); if (err < 0 && !(err == -ENOTCONN || err == -EBADFD)) { gu_fatal ("Failed to send State Transfer Request rejection: " "%zd (%s)", err, (gcs_error_str (-err))); assert (0); return -ENOTRECOVERABLE; // failed to clear donor status, } } return 0; // do not pass to application } static int _release_sst_flow_control (gcs_conn_t* conn) { int ret = 0; do { ret = gu_mutex_lock(&conn->fc_lock); if (!ret) { ret = gcs_fc_cont_end(conn); } else { gu_fatal("failed to lock FC mutex"); abort(); } } while (-EAGAIN == ret); // we need to send CONT here at all costs return ret; } static void start_progress(gcs_conn_t* conn) { gu_fifo_lock(conn->recv_q); { if (conn->progress_) { // Did not reach synced after previously becoming joined. delete conn->progress_; } conn->progress_ = new gu::Progress( conn->progress_cb_, "Processing event queue:", " events", gu_fifo_length(conn->recv_q), 16); } gu_fifo_release(conn->recv_q); } static void gcs_become_joined (gcs_conn_t* conn) { int ret; if (GCS_CONN_JOINER == conn->state) { ret = _release_sst_flow_control (conn); if (ret < 0) { gu_fatal ("Releasing SST flow control failed: %d (%s)", ret, gcs_error_str (-ret)); abort(); } conn->timeout = GU_TIME_ETERNITY; } /* See also gcs_handle_act_conf () for a case of cluster bootstrapping */ if (gcs_shift_state (conn, GCS_CONN_JOINED)) { conn->fc_offset = conn->queue_len; conn->join_gtid = gu::GTID(); conn->need_to_join = false; start_progress(conn); gu_debug("Become joined, FC offset %ld", conn->fc_offset); /* One of the cases when the node can become SYNCED */ if ((ret = gcs_send_sync (conn))) { gu_warn ("Sending SYNC failed: %d (%s)", ret, gcs_error_str(-ret)); } } else { assert (0); } } static void gcs_become_synced (gcs_conn_t* conn) { gu_fifo_lock(conn->recv_q); { if (conn->progress_) { conn->progress_->finish(); delete conn->progress_; conn->progress_ = nullptr; } gcs_shift_state (conn, GCS_CONN_SYNCED); conn->sync_sent(false); } gu_fifo_release(conn->recv_q); gu_debug("Become synced, FC offset %ld", conn->fc_offset); conn->fc_offset = 0; } /* to be called under protection of both recv_q and fc_lock */ static void _set_fc_limits (gcs_conn_t* conn) { /* Killing two birds with one stone: flat FC profile for master-slave setups * plus #440: giving single node some slack at some math correctness exp.*/ double const fn (conn->params.fc_single_primary ? 1.0 : sqrt(double(conn->memb_num))); conn->upper_limit = conn->params.fc_base_limit * fn + .5; conn->lower_limit = conn->upper_limit * conn->params.fc_resume_factor + .5; gu_info ("Flow-control interval: [%ld, %ld]", conn->lower_limit, conn->upper_limit); } /*! Handles flow control events * (this is frequent, so leave it inlined) */ static inline void gcs_handle_flow_control (gcs_conn_t* conn, const struct gcs_fc_event* fc) { if (gtohl(fc->conf_id) != (uint32_t)conn->conf_id) { // obsolete fc request return; } conn->stop_count += ((fc->stop != 0) << 1) - 1; // +1 if !0, -1 if 0 conn->stats_fc_received += (fc->stop != 0); if (1 == conn->stop_count) { gcs_sm_pause (conn->sm); // first STOP request } else if (0 == conn->stop_count) { gcs_sm_continue (conn->sm); // last CONT request } return; } static void _reset_pkt_size(gcs_conn_t* conn) { if (conn->state != GCS_CONN_CLOSED) return; // #600 workaround long ret; if (0 > (ret = gcs_core_set_pkt_size (conn->core, conn->params.max_packet_size))) { gu_warn ("Failed to set packet size: %ld (%s)", ret, strerror(-ret)); } } static int s_join (gcs_conn_t* conn) { int err; while (-EAGAIN == (err = gcs_core_send_join (conn->core, conn->join_gtid, conn->join_code))) usleep (10000); if (err < 0) { switch (err) { case -ENOTCONN: gu_info("Sending JOIN failed: %s. " "Will retry in new primary component.", gcs_error_str(-err)); return 0; default: gu_error("Sending JOIN failed: %d (%s).", err, gcs_error_str(-err)); return err; } } return 0; } /*! Handles configuration action */ // TODO: this function does not provide any way for recv_thread to gracefully // exit in case of self-leave message. static void gcs_handle_act_conf (gcs_conn_t* conn, gcs_act_rcvd& rcvd) { const gcs_act& act(rcvd.act); gcs_act_cchange const conf(act.buf, act.buf_len); assert(rcvd.id >= 0 || 0 == conf.memb.size()); assert(conf.vote_res <= 0); gu_mutex_lock(&conn->vote_lock_); if (conn->vote_wait_) { assert(0 == conn->vote_err_); if (conn->vote_gtid_.uuid() == conf.uuid) { if (conf.vote_seqno >= conn->vote_gtid_.seqno()) { /* vote end by membership change */ conn->vote_res_ = conf.vote_res; gu_cond_signal(&conn->vote_cond_); } /* else vote for this seqno has not been finalized */ } else { /* vote end by group change */ conn->vote_err_ = -EREMCHG; gu_cond_signal(&conn->vote_cond_); } if (0 == conn->memb_num) { /* vote end by connection close */ conn->vote_err_ = -EBADFD; gu_cond_signal(&conn->vote_cond_); } } conn->group_uuid = conf.uuid; conn->my_idx = rcvd.id; gu_mutex_unlock(&conn->vote_lock_); long ret; gu_fifo_lock(conn->recv_q); { /* reset flow control as membership is most likely changed */ if (!gu_mutex_lock (&conn->fc_lock)) { /* wake up send monitor if it was paused */ if (conn->stop_count > 0) gcs_sm_continue(conn->sm); conn->stop_sent_ = 0; conn->stop_count = 0; conn->conf_id = conf.conf_id; conn->memb_num = conf.memb.size(); _set_fc_limits (conn); gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); abort(); } conn->sync_sent(false); } gu_fifo_release (conn->recv_q); if (conf.conf_id < 0) { if (0 == conn->memb_num) { assert (conn->my_idx < 0); gu_info ("Received SELF-LEAVE. Closing connection."); gcs_shift_state (conn, GCS_CONN_CLOSED); } else { gu_info ("Received NON-PRIMARY."); assert (GCS_NODE_STATE_NON_PRIM == conf.memb[conn->my_idx].state_); gcs_become_open (conn); conn->global_seqno = conf.seqno; } return; } assert (conf.conf_id >= 0); /* */ if (conn->memb_num < 1) { assert(0); gu_fatal ("Internal error: PRIMARY configuration with %d nodes", conn->memb_num); abort(); } if (conn->my_idx < 0 || conn->my_idx >= conn->memb_num) { assert(0); gu_fatal ("Internal error: index of this node (%d) is out of bounds: " "[%d, %d]", conn->my_idx, 0, conn->memb_num - 1); abort(); } if (conf.memb[conn->my_idx].state_ < GCS_NODE_STATE_PRIM) { gu_fatal ("Internal error: NON-PRIM node state in PRIM configuraiton"); abort(); } /* */ conn->global_seqno = conf.seqno; /* at this point we have established protocol version, * so can set packet size */ // Ticket #600: commented out as unsafe under load _reset_pkt_size(conn); const gcs_conn_state_t old_state = conn->state; switch (conf.memb[conn->my_idx].state_) { case GCS_NODE_STATE_PRIM: gcs_become_primary(conn); return; /* Below are not real state transitions, rather state recovery, * so bypassing state transition matrix */ case GCS_NODE_STATE_JOINER: conn->state = GCS_CONN_JOINER; break; case GCS_NODE_STATE_DONOR: conn->state = GCS_CONN_DONOR; break; case GCS_NODE_STATE_JOINED: conn->state = GCS_CONN_JOINED; break; case GCS_NODE_STATE_SYNCED: conn->state = GCS_CONN_SYNCED; break; default: gu_fatal ("Internal error: unrecognized node state: %d", conf.memb[conn->my_idx].state_); abort(); } if (old_state != conn->state) { gu_info ("Restored state %s -> %s (%" PRId64 ")", gcs_conn_state_str[old_state], gcs_conn_state_str[conn->state], conn->global_seqno); } switch (conn->state) { case GCS_CONN_JOINED: /* One of the cases when the node can become SYNCED */ if ((ret = gcs_send_sync(conn)) < 0) { gu_warn ("CC: sending SYNC failed: %ld (%s)", ret, gcs_error_str (-ret)); } break; case GCS_CONN_JOINER: case GCS_CONN_DONOR: /* #603, #606 - duplicate JOIN msg in case we lost it */ assert (conf.conf_id >= 0); if (conn->need_to_join) s_join (conn); break; default: break; } } static long gcs_handle_act_state_req (gcs_conn_t* conn, struct gcs_act_rcvd& rcvd) { if ((gcs_seqno_t)conn->my_idx == rcvd.id) { int const donor_idx = (int)rcvd.id; // to pacify valgrind gu_debug("Got GCS_ACT_STATE_REQ to %i, my idx: %d", donor_idx, conn->my_idx); // rewrite to pass global seqno for application rcvd.id = conn->global_seqno; return gcs_become_donor (conn); } else { if (rcvd.id >= 0) { gcs_become_joiner (conn); } return 1; // pass to gcs_request_state_transfer() caller. } } /*! Allocates buffer with malloc to pass to the upper layer. */ static long gcs_handle_state_change (gcs_conn_t* conn, const struct gcs_act* act) { gu_debug ("Got '%s' dated %" PRId64, gcs_act_type_to_str (act->type), gcs_seqno_gtoh(*(gcs_seqno_t*)act->buf)); void* buf = malloc (act->buf_len); if (buf) { memcpy (buf, act->buf, act->buf_len); /* Initially act->buf points to internal static recv buffer. * No leak here. */ ((struct gcs_act*)act)->buf = buf; return 1; } else { gu_fatal ("Could not allocate state change action (%zd bytes)", act->buf_len); abort(); return -ENOMEM; } } static int _handle_vote (gcs_conn_t& conn, const struct gcs_act& act) { assert(act.type == GCS_ACT_VOTE); assert(act.buf_len >= ssize_t(2 * sizeof(int64_t))); int64_t seqno, res; gu::unserialize8(act.buf, act.buf_len, 0, seqno); gu::unserialize8(act.buf, act.buf_len, 8, res); if (GCS_VOTE_REQUEST == res) { log_debug << "GCS got vote request for " << seqno; return 1; /* pass request straight to slave q */ } assert(res <= 0); gu_mutex_lock(&conn.vote_lock_); log_debug << "Got vote action: " << seqno << ',' << res; assert(conn.vote_gtid_.seqno() != GCS_SEQNO_ILL); int ret(1); /* by default pass action to slave queue as usual */ if (conn.vote_wait_) { log_debug << "Error voting thread is waiting for: " << conn.vote_gtid_.seqno() << ',' << conn.vote_res_; assert(conn.group_uuid == conn.vote_gtid_.uuid()); if (conn.vote_res_ != 0 || seqno >= conn.vote_gtid_.seqno()) { /* any non-zero vote on past seqno or end vote on future seqno * must wake up voting thread: * - negative vote on past seqno means this node is inconsistent * since it did not detect any problems with it. * - any vote on a future seqno effectively means 0 vote on the * current vote_seqno. It also means that the current voter votes * otherwise (and too late), so is inconsistent. * In any case vote_res mismatch will show that. */ if (seqno > conn.vote_gtid_.seqno()) { conn.vote_res_ = 0; ret = 1; /* still pass to slave q */ } else { conn.vote_res_ = res; ret = 0; /* consumed by voter */ } gu_cond_signal(&conn.vote_cond_); } } else { log_debug << "No error voting thread, returning " << ret; } gu_mutex_unlock(&conn.vote_lock_); if (0 == ret) ::free(const_cast(act.buf)); /* consumed here */ return ret; } /*! * Performs work requred by action in current context. * @return negative error code, 0 if action should be discarded, 1 if should be * passed to application. */ static int gcs_handle_actions (gcs_conn_t* conn, struct gcs_act_rcvd& rcvd) { int ret(0); switch (rcvd.act.type) { case GCS_ACT_FLOW: assert (sizeof(struct gcs_fc_event) == rcvd.act.buf_len); gcs_handle_flow_control (conn, (const gcs_fc_event*)rcvd.act.buf); break; case GCS_ACT_CCHANGE: gcs_handle_act_conf (conn, rcvd); ret = 1; break; case GCS_ACT_STATE_REQ: ret = gcs_handle_act_state_req (conn, rcvd); break; case GCS_ACT_JOIN: ret = gcs_handle_state_change (conn, &rcvd.act); if (gcs_seqno_gtoh(*(gcs_seqno_t*)rcvd.act.buf) < 0 && GCS_CONN_JOINER == conn->state) gcs_become_primary (conn); else gcs_become_joined (conn); break; case GCS_ACT_SYNC: if (rcvd.id < 0) { /* sending SYNC failed, need to resend */ gu_fifo_lock(conn->recv_q); conn->sync_sent(false); gu_fifo_release(conn->recv_q); gcs_send_sync(conn); } else { ret = gcs_handle_state_change (conn, &rcvd.act); gcs_become_synced (conn); } break; case GCS_ACT_VOTE: ret = _handle_vote (*conn, rcvd.act); break; default: break; } return ret; } static inline void GCS_FIFO_PUSH_TAIL (gcs_conn_t* conn, ssize_t size) { if (conn->progress_) conn->progress_->update_total(1); conn->recv_q_size += size; gu_fifo_push_tail(conn->recv_q); } /* Returns true if timeout was handled and false otherwise */ static bool _handle_timeout (gcs_conn_t* conn) { bool ret; long long now = gu_time_calendar(); /* TODO: now the only point for timeout is flow control (#412), * later we might need to handle more timers. */ if (conn->timeout <= now) { ret = ((GCS_CONN_JOINER != conn->state) || (_release_sst_flow_control (conn) >= 0)); } else { gu_error ("Unplanned timeout! (tout: %lld, now: %lld)", conn->timeout, now); ret = false; } conn->timeout = GU_TIME_ETERNITY; return ret; } static long _check_recv_queue_growth (gcs_conn_t* conn, ssize_t size) { assert (GCS_CONN_JOINER == conn->state); long ret = 0; long long pause = gcs_fc_process (&conn->stfc, size); if (pause > 0) { /* replication needs throttling */ ret = gu_mutex_lock(&conn->fc_lock); if (!ret) { ret = gcs_fc_stop_end(conn); } else { gu_fatal("failed to lock FC mutex"); abort(); } if (gu_likely(pause != GU_TIME_ETERNITY)) { if (GU_TIME_ETERNITY == conn->timeout) { conn->timeout = gu_time_calendar(); } conn->timeout += pause; // we need to track pauses regardless } else if (conn->timeout != GU_TIME_ETERNITY) { conn->timeout = GU_TIME_ETERNITY; gu_warn ("Replication paused until state transfer is complete " "due to reaching hard limit on the writeset queue size."); } return ret; } else { return pause; // 0 or error code } } static long _close(gcs_conn_t* conn, bool join_recv_thread) { /* all possible races in connection closing should be resolved by * the following call, it is thread-safe */ long ret; if (gu_atomic_fetch_and_add(&conn->inner_close_count, 1) != 0) { return -EALREADY; } if (!(ret = gcs_sm_close (conn->sm))) { // we ignore return value on purpose. the reason is // we can not tell why self-leave message is generated. // there are two possible reasons. // 1. gcs_core_close is called. // 2. GCommConn::run() caught exception. (void)gcs_core_close (conn->core); if (join_recv_thread) { /* if called from gcs_close(), we need to synchronize with gcs_recv_thread at this point */ if ((ret = gu_thread_join (conn->recv_thread, NULL))) { gu_error ("Failed to join recv_thread(): %ld (%s)", -ret, strerror(-ret)); } else { gu_info ("recv_thread() joined."); } /* recv_thread() is supposed to set state to CLOSED when exiting */ assert (GCS_CONN_CLOSED == conn->state); } gu_info ("Closing send queue."); struct gcs_repl_act** act_ptr; /* At this point (state == CLOSED) no new threads should be able to * queue for repl (check gcs_repl()), and recv thread is joined, so no * new actions will be received. Abort threads that are still waiting * in repl queue */ while ((act_ptr = (struct gcs_repl_act**)gcs_fifo_lite_get_head (conn->repl_q))) { struct gcs_repl_act* act = *act_ptr; gcs_fifo_lite_pop_head (conn->repl_q); /* This will wake up repl threads in repl_q - * they'll quit on their own, * they don't depend on the conn object after waking */ gu_mutex_lock (&act->wait_mutex); gu_cond_signal (&act->wait_cond); gu_mutex_unlock (&act->wait_mutex); } gcs_fifo_lite_close (conn->repl_q); /* wake all gcs_recv() threads () */ // FIXME: this can block waiting for applicaiton threads to fetch all // items. In certain situations this can block forever. Ticket #113 gu_info ("Closing receive queue."); gu_fifo_close (conn->recv_q); } return ret; } /* * gcs_recv_thread() receives whatever actions arrive from group, * and performs necessary actions based on action type. */ static void *gcs_recv_thread (void *arg) { gcs_conn_t* conn = (gcs_conn_t*)arg; ssize_t ret = -ECONNABORTED; // To avoid race between gcs_open() and the following state check in while() gu_cond_t tmp_cond; /* TODO: rework when concurrency in SM is allowed */ gu_cond_init (&tmp_cond, NULL); gcs_sm_enter(conn->sm, &tmp_cond, false, true); gcs_sm_leave(conn->sm); gu_cond_destroy (&tmp_cond); while (conn->state < GCS_CONN_CLOSED) { gcs_seqno_t this_act_id = GCS_SEQNO_ILL; struct gcs_repl_act** repl_act_ptr; struct gcs_act_rcvd rcvd; ret = gcs_core_recv (conn->core, &rcvd, conn->timeout); if (gu_unlikely(ret <= 0)) { gu_debug("gcs_core_recv returned %zd: %s", ret, gcs_error_str(-ret)); if (-ETIMEDOUT == ret && _handle_timeout(conn)) continue; assert (NULL == rcvd.act.buf); assert (0 == rcvd.act.buf_len); assert (GCS_ACT_ERROR == rcvd.act.type || GCS_ACT_INCONSISTENCY == rcvd.act.type); assert (GCS_SEQNO_ILL == rcvd.id); if (GCS_ACT_INCONSISTENCY == rcvd.act.type) { /* In the case of inconsistency our concern is to report it to * replicator ASAP. Current contents of the slave queue are * meaningless. */ gu_fifo_clear(conn->recv_q); } struct gcs_recv_act* err_act = (struct gcs_recv_act*) gu_fifo_get_tail(conn->recv_q); err_act->rcvd = rcvd; err_act->local_id = GCS_SEQNO_ILL; GCS_FIFO_PUSH_TAIL (conn, rcvd.act.buf_len); break; } // gu_info ("Received action type: %d, size: %d, global seqno: %lld", // act_type, act_size, (long long)act_id); assert (rcvd.act.type < GCS_ACT_ERROR); assert (ret == rcvd.act.buf_len); if (gu_unlikely(rcvd.act.type >= GCS_ACT_STATE_REQ || (conn->vote_wait_ && GCS_ACT_COMMIT_CUT==rcvd.act.type))) { ret = gcs_handle_actions (conn, rcvd); if (gu_unlikely(ret <= 0 && GCS_ACT_COMMIT_CUT == rcvd.act.type)) { /* Commit cut will be discarded, the buffer needs to be * freed */ ::free(const_cast(rcvd.act.buf)); } if (gu_unlikely(ret < 0)) { // error gu_debug ("gcs_handle_actions returned %zd: %s", ret, strerror(-ret)); break; } if (gu_likely(ret <= 0)) continue; // not for application } /* deliver to application (note matching assert in the bottom-half of * gcs_repl()) */ if (gu_likely (rcvd.act.type != GCS_ACT_WRITESET || (rcvd.id > 0 && (conn->global_seqno = rcvd.id)))) { /* successful delivery - increment local order */ this_act_id = gu_atomic_fetch_and_add(&conn->local_act_id, 1); } if (NULL != rcvd.local && (repl_act_ptr = (struct gcs_repl_act**) gcs_fifo_lite_get_head (conn->repl_q)) && (gu_likely ((*repl_act_ptr)->act_in == rcvd.local) || /* at this point repl_q is locked and we need to unlock it and * return false to fall to the 'else' branch; unlikely case */ (gcs_fifo_lite_release (conn->repl_q), false))) { /* local action from repl_q */ struct gcs_repl_act* repl_act = *repl_act_ptr; gcs_fifo_lite_pop_head (conn->repl_q); assert (repl_act->action->type == rcvd.act.type); assert (repl_act->action->size == rcvd.act.buf_len || repl_act->action->type == GCS_ACT_STATE_REQ); repl_act->action->buf = rcvd.act.buf; repl_act->action->seqno_g = rcvd.id; repl_act->action->seqno_l = this_act_id; gu_mutex_lock (&repl_act->wait_mutex); gu_cond_signal (&repl_act->wait_cond); gu_mutex_unlock (&repl_act->wait_mutex); } else if (this_act_id >= 0 || /* action that was SENT and there is no sender waiting for it */ (rcvd.id == -EAGAIN && rcvd.act.type == GCS_ACT_WRITESET)) { /* remote/non-repl'ed action */ assert(rcvd.local != NULL || rcvd.id != -EAGAIN); /* Note that the resource pointed to by rcvd.local belongs to * the original action sender, so we don't care about freeing it */ struct gcs_recv_act* recv_act = (struct gcs_recv_act*)gu_fifo_get_tail (conn->recv_q); if (gu_likely (NULL != recv_act)) { recv_act->rcvd = rcvd; recv_act->local_id = this_act_id; conn->queue_len = gu_fifo_length (conn->recv_q) + 1; /* attempt to send stops only for foreign actions */ bool const send_stop (rcvd.local == NULL && gcs_fc_stop_begin(conn)); // release queue GCS_FIFO_PUSH_TAIL (conn, rcvd.act.buf_len); if (gu_unlikely(GCS_CONN_JOINER == conn->state && !send_stop)) { ret = _check_recv_queue_growth (conn, rcvd.act.buf_len); assert (ret <= 0); if (ret < 0) break; } if (gu_unlikely(send_stop) && (ret = gcs_fc_stop_end(conn))) { gu_error ("gcs_fc_stop() returned %zd: %s", ret, gcs_error_str(-ret)); break; } } else { assert (GCS_CONN_CLOSED == conn->state); ret = -EBADFD; break; } // gu_info("Received foreign action of type %d, size %d, id=%llu, " // "action %p", rcvd.act.type, rcvd.act.buf_len, // this_act_id, rcvd.act.buf); } else if (rcvd.id == -EAGAIN) { assert(rcvd.local != NULL); /* local action */ gu_fatal("Action {%p, %zd, %s} needs resending: " "sender idx %d, my idx %d, local %p", rcvd.act.buf, rcvd.act.buf_len, gcs_act_type_to_str(rcvd.act.type), rcvd.sender_idx, conn->my_idx, rcvd.local); assert (0); ret = -ENOTRECOVERABLE; break; } else if (conn->my_idx == rcvd.sender_idx) { gu_debug("Discarding: unordered local action not in repl_q: " "{ {%p, %zd, %s}, %d, %" PRId64 " }.", rcvd.act.buf, rcvd.act.buf_len, gcs_act_type_to_str(rcvd.act.type), rcvd.sender_idx, rcvd.id); } else { gu_fatal ("Protocol violation: unordered remote action: " "{ {%p, %zd, %s}, %d, % " PRId64 " }", rcvd.act.buf, rcvd.act.buf_len, gcs_act_type_to_str(rcvd.act.type), rcvd.sender_idx, rcvd.id); assert (0); ret = -ENOTRECOVERABLE; break; } } if (ret > 0) { ret = 0; } else if (ret < 0) { /* In case of error call _close() to release repl_q waiters. */ (void)_close(conn, false); gcs_shift_state (conn, GCS_CONN_CLOSED); } gu_info ("RECV thread exiting %zd: %s", ret, strerror(-ret)); return NULL; } /* Opens connection to group */ long gcs_open (gcs_conn_t* conn, const char* channel, const char* url, bool const bootstrap) { long ret = 0; if ((ret = gcs_sm_open(conn->sm))) return ret; // open in case it is closed gu_cond_t tmp_cond; /* TODO: rework when concurrency in SM is allowed */ gu_cond_init (&tmp_cond, NULL); if ((ret = gcs_sm_enter (conn->sm, &tmp_cond, false, true))) { gu_error("Failed to enter send monitor: %ld (%s)", ret, strerror(-ret)); return ret; } if (GCS_CONN_CLOSED == conn->state) { if (!(ret = gcs_core_open (conn->core, channel, url, bootstrap))) { _reset_pkt_size(conn); if (!(ret = gu_thread_create (&conn->recv_thread, NULL, gcs_recv_thread, conn))) { gcs_fifo_lite_open(conn->repl_q); gu_fifo_open(conn->recv_q); gcs_shift_state (conn, GCS_CONN_OPEN); gu_info ("Opened channel '%s'", channel); conn->inner_close_count = 0; conn->outer_close_count = 0; goto out; } else { gu_error ("Failed to create main receive thread: %ld (%s)", ret, strerror(-ret)); } gcs_core_close (conn->core); } else { gu_error ("Failed to open channel '%s' at '%s': %ld (%s)", channel, url, ret, strerror(-ret)); } } else { gu_error ("Bad GCS connection state: %d (%s)", conn->state, gcs_conn_state_str[conn->state]); ret = -EBADFD; } out: gcs_sm_leave (conn->sm); gu_cond_destroy (&tmp_cond); return ret; } /* Closes group connection */ /* After it returns, application should have all time in the world to cancel * and join threads which try to access the handle, before calling gcs_destroy() * on it. */ long gcs_close (gcs_conn_t *conn) { long ret; if (gu_atomic_fetch_and_add(&conn->outer_close_count, 1) != 0) { return -EALREADY; } if ((ret = _close(conn, true)) == -EALREADY) { gu_info("recv_thread() already closing, joining thread."); /* _close() has already been called by gcs_recv_thread() and it is taking care of cleanup, just join the thread */ if ((ret = gu_thread_join (conn->recv_thread, NULL))) { gu_error ("Failed to join recv_thread(): %ld (%s)", -ret, strerror(-ret)); } else { gu_info ("recv_thread() joined."); } } /* recv_thread() is supposed to set state to CLOSED when exiting */ assert (GCS_CONN_CLOSED == conn->state); if (conn->progress_) { delete conn->progress_; conn->progress_ = nullptr; } return ret; } gcs_conn::~gcs_conn() { auto conn(this); // to minimize diff int err; gu_cond_t tmp_cond; gu_cond_init (&tmp_cond, NULL); if (!(err = gcs_sm_enter (conn->sm, &tmp_cond, false, true))) // need an error here { if (GCS_CONN_CLOSED != conn->state) { if (GCS_CONN_CLOSED > conn->state) gu_error ("Attempt to call gcs_destroy() before gcs_close(): " "state = %d", conn->state); gu_cond_destroy (&tmp_cond); gu_abort(); } gcs_sm_leave (conn->sm); gcs_shift_state (conn, GCS_CONN_DESTROYED); /* we must unlock the mutex here to allow unfortunate threads * to acquire the lock and give up gracefully */ } else { gu_debug("gcs_destroy: gcs_sm_enter() err = %d", err); // We should still cleanup resources } gu_fifo_destroy (conn->recv_q); gu_cond_destroy (&tmp_cond); gcs_sm_destroy (conn->sm); if ((err = gcs_fifo_lite_destroy (conn->repl_q))) { gu_error ("Error destroying repl FIFO: %d (%s)", err, strerror(-err)); gu_abort(); } if ((err = gcs_core_destroy (conn->core))) { gu_error ("Error destroying core: %d (%s)", err, strerror(-err)); gu_abort(); } /* This must not last for long */ while (gu_mutex_destroy (&conn->fc_lock)); } /* Frees resources associated with GCS connection handle */ long gcs_destroy (gcs_conn_t *conn) { try { delete conn; return 0; } catch (...) { return -1; } } /* Puts action in the send queue and returns */ long gcs_sendv (gcs_conn_t* const conn, const struct gu_buf* const act_bufs, size_t const act_size, gcs_act_type_t const act_type, bool const scheduled, bool const grab) { assert (!(scheduled && grab)); if (gu_unlikely(act_size > GCS_MAX_ACT_SIZE)) return -EMSGSIZE; long ret = -ENOTCONN; if (gu_unlikely(grab)) { if (!(ret = gcs_sm_grab (conn->sm))) { while ((GCS_CONN_OPEN >= conn->state) && (ret = gcs_core_send (conn->core, act_bufs, act_size, act_type)) == -ERESTART); gcs_sm_release (conn->sm); } } else { /*! locking connection here to avoid race with gcs_close() * @note: gcs_repl() and gcs_recv() cannot lock connection * because they block indefinitely waiting for actions */ gu_cond_t tmp_cond; gu_cond_init (&tmp_cond, NULL); if (!(ret = gcs_sm_enter (conn->sm, &tmp_cond, scheduled, true))) { while ((GCS_CONN_OPEN >= conn->state) && (ret = gcs_core_send (conn->core, act_bufs, act_size, act_type)) == -ERESTART); gcs_sm_leave (conn->sm); gu_cond_destroy (&tmp_cond); } } return ret; } long gcs_schedule (gcs_conn_t* conn) { return gcs_sm_schedule (conn->sm); } long gcs_interrupt (gcs_conn_t* conn, long handle) { return gcs_sm_interrupt (conn->sm, handle); } long gcs_caused(gcs_conn_t* conn, gu::GTID& gtid) { return gcs_core_caused(conn->core, gtid); } static inline bool fc_active(gcs_conn_t* conn) { return conn->stop_count > 0; } /* Puts action in the send queue and returns after it is replicated */ long gcs_replv (gcs_conn_t* const conn, //!size > GCS_MAX_ACT_SIZE)) return -EMSGSIZE; long ret; assert (act); assert (act->size > 0); act->seqno_l = GCS_SEQNO_ILL; act->seqno_g = GCS_SEQNO_ILL; /* This is good - we don't have to do a copy because we wait */ struct gcs_repl_act repl_act(act_in, act); gu_mutex_init (&repl_act.wait_mutex, NULL); gu_cond_init (&repl_act.wait_cond, NULL); /* Send action and wait for signal from recv_thread * we need to lock a mutex before we can go wait for signal */ if (!(ret = gu_mutex_lock (&repl_act.wait_mutex))) { // Lock here does the following: // 1. serializes gcs_core_send() access between gcs_repl() and // gcs_send() // 2. avoids race with gcs_close() and gcs_destroy() if (!(ret = gcs_sm_enter (conn->sm, &repl_act.wait_cond, scheduled, true))) { struct gcs_repl_act** act_ptr; const void* const orig_buf = act->buf; // some hack here to achieve one if() instead of two: // ret = -EAGAIN part is a workaround for #569 // if (conn->state >= GCS_CONN_CLOSE) or (act_ptr == NULL) // ret will be -ENOTCONN if ((ret = -EAGAIN, !fc_active(conn) || act->type != GCS_ACT_WRITESET) && (ret = -ENOTCONN, GCS_CONN_OPEN >= conn->state) && (act_ptr = (struct gcs_repl_act**)gcs_fifo_lite_get_tail (conn->repl_q))) { *act_ptr = &repl_act; gcs_fifo_lite_push_tail (conn->repl_q); // Keep on trying until something else comes out while ((ret = gcs_core_send (conn->core, act_in, act->size, act->type)) == -ERESTART) {} if (ret < 0) { /* remove item from the queue, it will never be delivered */ gu_debug( "Send action {%p, %" PRId32 ", %s} returned %ld (%s)", act->buf, act->size, gcs_act_type_to_str(act->type), ret, gcs_error_str(-ret)); if (!gcs_fifo_lite_remove (conn->repl_q)) { gu_fatal ("Failed to remove unsent item from repl_q"); assert(0); ret = -ENOTRECOVERABLE; } } else { assert (ret == (ssize_t)act->size); } } gcs_sm_leave (conn->sm); assert(ret); /* now we can go waiting for action delivery */ if (ret >= 0) { /* Sequential consistency is now guaranteed by the * backend. */ if (seq_cb && seq_cb->fn) { seq_cb->fn(seq_cb->ctx); } gu_cond_wait (&repl_act.wait_cond, &repl_act.wait_mutex); #ifndef GCS_FOR_GARB /* assert (act->buf != 0); */ if (act->buf == 0) { /* Recv thread purged repl_q before action was delivered */ ret = -ENOTCONN; goto out; } #else assert (act->buf == 0); #endif /* GCS_FOR_GARB */ if (act->seqno_g < 0) { assert (GCS_SEQNO_ILL == act->seqno_l || GCS_ACT_WRITESET != act->type); if (act->seqno_g == GCS_SEQNO_ILL) { /* action was not replicated for some reason */ assert (orig_buf == act->buf); ret = -EINTR; } else { /* core provided an error code in global seqno */ assert (orig_buf != act->buf); ret = act->seqno_g; act->seqno_g = GCS_SEQNO_ILL; } if (orig_buf != act->buf) // action was allocated in gcache { gu_debug("Freeing gcache buffer %p after receiving %ld", act->buf, ret); gcs_gcache_free (conn->gcache, act->buf); act->buf = orig_buf; } } } } #ifndef GCS_FOR_GARB out: #endif /* GCS_FOR_GARB */ gu_mutex_unlock (&repl_act.wait_mutex); } gu_mutex_destroy (&repl_act.wait_mutex); gu_cond_destroy (&repl_act.wait_cond); #ifdef GCS_DEBUG_GCS // gu_debug ("\nact_size = %u\nact_type = %u\n" // "act_id = %llu\naction = %p (%s)\n", // act->size, act->type, act->seqno_g, act->buf, act->buf); #endif return ret; } long gcs_request_state_transfer (gcs_conn_t* conn, int version, const void* req, size_t size, const char* donor, const gu::GTID& ist_gtid, gcs_seqno_t& order) { long ret = -ENOMEM; size_t donor_len = strlen(donor) + 1; // include terminating \0 size_t rst_size = size + donor_len + ist_gtid.serial_size() + 2; // for simplicity, allocate maximum space what we need here. char* rst = (char*)gu_malloc (rst_size); order = GCS_SEQNO_ILL; if (rst) { log_debug << "ist_gtid " << ist_gtid; int offset = 0; // version 0,1 /* RST format: |donor name|\0|app request| * anything more complex will require a special (de)serializer. * NOTE: this is sender part. Check gcs_group_handle_state_request() * for the receiver part. */ if (version < 2) { #ifndef GCS_FOR_GARB assert(0); // this branch is for SST request by garbd only #endif /* GCS_FOR_GARB */ memcpy (rst + offset, donor, donor_len); offset += donor_len; memcpy (rst + offset, req, size); rst_size = size + donor_len; } // version 2(expose joiner's seqno and smart donor selection) // RST format: |donor_name|\0|'V'|version|ist_uuid|ist_seqno|app_request| // we expect 'version' could be hold by 'char' // since app_request v0 starts with sst method name // and app_request v1 starts with 'STRv1' // and ist_uuid starts with hex character in lower case. // it's safe to use 'V' as separator. else { memcpy (rst + offset, donor, donor_len); offset += donor_len; rst[offset++] = 'V'; rst[offset++] = (char)version; offset = ist_gtid.serialize(rst, rst_size, offset); memcpy (rst + offset, req, size); assert(offset + size == rst_size); log_debug << "SST sending: " << (char*)req << ", " << rst_size; } struct gcs_action action; action.buf = rst; action.size = (ssize_t)rst_size; action.type = GCS_ACT_STATE_REQ; ret = gcs_repl(conn, &action, false); gu_free (rst); order = action.seqno_l; if (ret > 0) { assert (action.buf != rst); #ifndef GCS_FOR_GARB assert (action.buf != NULL); gcs_gcache_free (conn->gcache, action.buf); #else assert (action.buf == NULL); #endif assert (ret == (ssize_t)rst_size); assert (action.seqno_g >= 0); assert (action.seqno_l > 0); // on joiner global seqno stores donor index // on donor global seqno stores global seqno ret = action.seqno_g; } else { assert (/*action.buf == NULL ||*/ action.buf == rst); } } return ret; } long gcs_desync (gcs_conn_t* conn, gcs_seqno_t& order) { gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; // for desync operation we use the lowest str_version. long ret = gcs_request_state_transfer (conn, 2, "", 1, GCS_DESYNC_REQ, gu::GTID(ist_uuid, ist_seqno), order); if (ret >= 0) { return 0; } else { return ret; } } static inline void GCS_FIFO_POP_HEAD (gcs_conn_t* conn, ssize_t size) { if (conn->progress_) conn->progress_->update(1); assert (conn->recv_q_size >= size); conn->recv_q_size -= size; gu_fifo_pop_head (conn->recv_q); } /* Returns when an action from another process is received */ long gcs_recv (gcs_conn_t* conn, struct gcs_action* action) { int err; struct gcs_recv_act* recv_act = NULL; assert (action); if ((recv_act = (struct gcs_recv_act*)gu_fifo_get_head (conn->recv_q, &err))) { conn->queue_len = gu_fifo_length (conn->recv_q) - 1; bool send_cont = gcs_fc_cont_begin (conn); bool send_sync = gcs_send_sync_begin (conn); action->buf = (void*)recv_act->rcvd.act.buf; action->size = recv_act->rcvd.act.buf_len; action->type = recv_act->rcvd.act.type; action->seqno_g = recv_act->rcvd.id; action->seqno_l = recv_act->local_id; if (gu_unlikely (GCS_ACT_CCHANGE == action->type)) { err = gu_fifo_cancel_gets (conn->recv_q); if (err) { gu_fatal ("Internal logic error: failed to cancel recv_q " "\"gets\": %d (%s). Aborting.", err, strerror(-err)); gu_abort(); } } GCS_FIFO_POP_HEAD (conn, action->size); // release the queue if (gu_unlikely(send_cont) && (err = gcs_fc_cont_end(conn))) { // We have successfully received an action, but failed to send // important control message. What do we do? Inability to send CONT // can block the whole cluster. There are only conn->queue_len - 1 // attempts to do that (that's how many times we'll get here). // Perhaps if the last attempt fails, we should crash. if (conn->queue_len > 0) { gu_warn ("Failed to send CONT message: %d (%s). " "Attempts left: %ld", err, gcs_error_str(-err), conn->queue_len); } else { gu_fatal ("Last opportunity to send CONT message failed: " "%d (%s). Aborting to avoid cluster lock-up...", err, gcs_error_str(-err)); gcs_close(conn); gu_abort(); } } else if (gu_unlikely(send_sync) && (err = gcs_send_sync_end (conn))) { gu_warn ("Failed to send SYNC message: %d (%s). Will try later.", err, gcs_error_str(-err)); } return action->size; } else { action->buf = NULL; action->size = 0; action->type = GCS_ACT_ERROR; action->seqno_g = GCS_SEQNO_ILL; action->seqno_l = GCS_SEQNO_ILL; switch (err) { case -ENODATA: assert (GCS_CONN_CLOSED == conn->state); return GCS_CLOSED_ERROR; default: return err; } } } long gcs_resume_recv (gcs_conn_t* conn) { int ret = GCS_CLOSED_ERROR; ret = gu_fifo_resume_gets (conn->recv_q); if (ret) { if (conn->state < GCS_CONN_CLOSED) { gu_fatal ("Internal logic error: failed to resume \"gets\" on " "recv_q: %d (%s). Aborting.", ret, strerror (-ret)); assert(0); gcs_close (conn); gu_abort(); } else { ret = GCS_CLOSED_ERROR; } } return ret; } long gcs_wait (gcs_conn_t* conn) { if (gu_likely(GCS_CONN_SYNCED == conn->state)) { return (conn->stop_count > 0 || (conn->queue_len > conn->upper_limit)); } else { switch (conn->state) { case GCS_CONN_OPEN: return -ENOTCONN; case GCS_CONN_CLOSED: case GCS_CONN_DESTROYED: return GCS_CLOSED_ERROR; default: return -EAGAIN; // wait until get sync } } } long gcs_conf_set_pkt_size (gcs_conn_t *conn, long pkt_size) { if (conn->params.max_packet_size == pkt_size) return pkt_size; return gcs_set_pkt_size (conn, pkt_size); } long gcs_set_last_applied (gcs_conn_t* conn, const gu::GTID& gtid) { assert(gtid.uuid() != GU_UUID_NIL); assert(gtid.seqno() >= 0); gu_cond_t cond; gu_cond_init (&cond, NULL); long ret = gcs_sm_enter (conn->sm, &cond, false, false); if (ret) { log_info << "Unable to report last applied write-set to " << "cluster. Will try later. " << "(gcs_sm_enter(): " << -ret << " seqno: " << gtid.seqno() << ")"; } else { log_debug << "Sending last applied seqno: " << gtid.seqno(); ret = gcs_core_set_last_applied(conn->core, gtid); gcs_sm_leave(conn->sm); if (ret < 0) { log_info << "Unable to report last applied write-set to " << "cluster. Will try later. " << "(gcs_core_set_last_applied(): " << -ret << " seqno: " << gtid.seqno() << ")"; } } gu_cond_destroy (&cond); return ret; } int gcs_proto_ver(gcs_conn_t* conn) { return gcs_core_proto_ver(conn->core); } int gcs_vote (gcs_conn_t* const conn, const gu::GTID& gtid, uint64_t const code, const void* const msg, size_t const msg_len) { if (gcs_proto_ver(conn) < 1) { assert(code != 0); // should be here only our own initiative log_info << "Not all group members support inconsistency voting. " << "Reverting to old behavior: abort on error."; return 1; /* no voting with old protocol */ } if (conn->state >= GCS_CONN_JOINER) { assert(code != 0); // should be here only our own initiative log_info << "Can't vote when not at least JOINED. " << "Assuming inconsistency. Full SST is required"; return 1; /* Error applying IST event */ } int const err(gu_mutex_lock(&conn->vote_lock_)); if (gu_unlikely(0 != err)) { assert(0); return -err; } while (conn->vote_wait_) /* only one at a time */ { gu_mutex_unlock(&conn->vote_lock_); usleep(10000); gu_mutex_lock(&conn->vote_lock_); } if (gtid.uuid() == conn->vote_gtid_.uuid() && /* seqno was voted already */ gtid.seqno() <= conn->vote_gtid_.seqno()) /* - ensure monotonicity */ { assert(0 == code); /* we can be here only by voting request */ gu_mutex_unlock(&conn->vote_lock_); return -EALREADY; } gu::GTID const old_gtid(conn->vote_gtid_); conn->vote_gtid_ = gtid; conn->vote_err_ = 0; /* We can reach this point for two reasons: * 1. either we want to report an error * 2. or we are voting by request (in which case code == 0) */ int64_t my_vote; if (0 != code) { size_t const buf_len(gtid.serial_size() + sizeof(code)); std::vector buf(buf_len); size_t offset(0); offset = gtid.serialize(buf.data(), buf.size(), offset); offset = gu::serialize8(code, buf.data(), buf.size(), offset); assert(buf.size() == offset); gu::MMH3 hash; hash.append(buf.data(), buf.size()); hash.append(msg, msg_len); my_vote = (hash.gather8() | (1ULL << 63)); // make sure it is never 0 (and always negative) in case of error } else { my_vote = 0; } int ret(gcs_core_send_vote(conn->core, gtid, my_vote, msg, msg_len)); if (ret < 0) { assert(ret != -EAGAIN); /* EAGAIN should be taken care of at core level*/ conn->vote_gtid_ = old_gtid; /* failed to send vote */ goto cleanup; } /* wait for voting results */ conn->vote_wait_ = true; gu_cond_wait(&conn->vote_cond_, &conn->vote_lock_); ret = conn->vote_err_; assert(ret <= 0); if (0 == ret) { ret = my_vote != conn->vote_res_; // 0 for agreement, 1 for disagreement } conn->vote_wait_ = false; cleanup: log_debug << "Error voting thread wating on " << gtid.seqno() << ',' << my_vote << ", got " << conn->vote_res_ << ", returning " << ret; conn->vote_res_ = 0; gu_mutex_unlock(&conn->vote_lock_); return ret; } long gcs_join (gcs_conn_t* conn, const gu::GTID& gtid, int const code) { /* * Always allow sending of join messages when not in JOINER state. * This is required for correct handling of desync counter, * especially in DONOR state: * If the DONOR does desync in combination with SST donation, the * gcs_join() calls from resync() and sst_sent() might * come with out of order seqnos, leaving the desync_count in gcs_group * permanently in non-zero value. In this case the node will not become * synced again unless it is temporarily removed from the group. */ if (conn->state != GCS_CONN_JOINER || code < 0 || gtid.seqno() >= conn->join_gtid.seqno()) { conn->join_gtid = gtid; conn->join_code = code; conn->need_to_join = true; return s_join (conn); } assert(0); return 0; } gcs_seqno_t gcs_local_sequence(gcs_conn_t* conn) { return gu_atomic_fetch_and_add(&conn->local_act_id, 1); } void gcs_get_stats (gcs_conn_t* conn, struct gcs_stats* stats) { gu_fifo_stats_get (conn->recv_q, &stats->recv_q_len, &stats->recv_q_len_max, &stats->recv_q_len_min, &stats->recv_q_len_avg); stats->recv_q_size = conn->recv_q_size; gcs_sm_stats_get (conn->sm, &stats->send_q_len, &stats->send_q_len_max, &stats->send_q_len_min, &stats->send_q_len_avg, &stats->fc_paused_ns, &stats->fc_paused_avg); stats->fc_ssent = conn->stats_fc_stop_sent; stats->fc_csent = conn->stats_fc_cont_sent; stats->fc_received = conn->stats_fc_received; stats->fc_active = fc_active(conn); stats->fc_requested= conn->stop_sent_ > 0; gcs_core_get_protocols(conn->core, stats->proto_appl, stats->proto_repl, stats->proto_gcs); } void gcs_flush_stats(gcs_conn_t* conn) { gu_fifo_stats_flush(conn->recv_q); gcs_sm_stats_flush (conn->sm); conn->stats_fc_stop_sent = 0; conn->stats_fc_cont_sent = 0; conn->stats_fc_received = 0; } void gcs_get_status(gcs_conn_t* conn, gu::Status& status) { if (conn->state < GCS_CONN_CLOSED) { gcs_core_get_status(conn->core, status); } } static long _set_fc_limit (gcs_conn_t* conn, const char* value) { long long limit; const char* const endptr = gu_str2ll(value, &limit); if (limit > 0LL && *endptr == '\0') { if (limit > LONG_MAX) limit = LONG_MAX; gu_fifo_lock(conn->recv_q); { if (!gu_mutex_lock (&conn->fc_lock)) { conn->params.fc_base_limit = limit; _set_fc_limits (conn); gu_config_set_int64 (conn->config, GCS_PARAMS_FC_LIMIT, conn->params.fc_base_limit); gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); abort(); } } gu_fifo_release (conn->recv_q); return 0; } else { return -EINVAL; } } static long _set_fc_factor (gcs_conn_t* conn, const char* value) { double factor; const char* const endptr = gu_str2dbl(value, &factor); if (factor >= 0.0 && factor <= 1.0 && *endptr == '\0') { gu_fifo_lock(conn->recv_q); if (!gu_mutex_lock (&conn->fc_lock)) { if (factor != conn->params.fc_resume_factor) { conn->params.fc_resume_factor = factor; _set_fc_limits (conn); gu_config_set_double (conn->config, GCS_PARAMS_FC_FACTOR, conn->params.fc_resume_factor); } gu_mutex_unlock (&conn->fc_lock); } else { gu_fatal ("Failed to lock mutex."); gu_abort(); } gu_fifo_release (conn->recv_q); return 0; } else { return -EINVAL; } } static long _set_fc_debug (gcs_conn_t* conn, const char* value) { bool debug; const char* const endptr = gu_str2bool(value, &debug); if (*endptr == '\0') { if (conn->params.fc_debug == debug) return 0; conn->params.fc_debug = debug; gcs_fc_debug (&conn->stfc, debug); gu_config_set_bool (conn->config, GCS_PARAMS_FC_DEBUG, debug); return 0; } else { return -EINVAL; } } static long _set_sync_donor (gcs_conn_t* conn, const char* value) { bool sd; const char* const endptr = gu_str2bool (value, &sd); if (endptr[0] != '\0') return -EINVAL; if (conn->params.sync_donor != sd) { conn->params.sync_donor = sd; conn->max_fc_state = sd ? GCS_CONN_DONOR : GCS_CONN_JOINED; } return 0; } static long _set_pkt_size (gcs_conn_t* conn, const char* value) { long long pkt_size; const char* const endptr = gu_str2ll (value, &pkt_size); if (pkt_size > 0 && *endptr == '\0') { if (pkt_size > LONG_MAX) pkt_size = LONG_MAX; if (conn->params.max_packet_size == pkt_size) return 0; long ret = gcs_set_pkt_size (conn, pkt_size); if (ret >= 0) { ret = 0; gu_config_set_int64(conn->config,GCS_PARAMS_MAX_PKT_SIZE,pkt_size); } return ret; } else { // gu_warn ("Invalid value for %s: '%s'", GCS_PARAMS_PKT_SIZE, value); return -EINVAL; } } static long _set_recv_q_hard_limit (gcs_conn_t* conn, const char* value) { long long limit; const char* const endptr = gu_str2ll (value, &limit); if (limit > 0 && *endptr == '\0') { if (limit > LONG_MAX) limit = LONG_MAX; long long limit_fixed = limit * gcs_fc_hard_limit_fix; if (conn->params.recv_q_hard_limit == limit_fixed) return 0; gu_config_set_int64 (conn->config, GCS_PARAMS_RECV_Q_HARD_LIMIT, limit); conn->params.recv_q_hard_limit = limit_fixed; return 0; } else { return -EINVAL; } } static long _set_recv_q_soft_limit (gcs_conn_t* conn, const char* value) { double dbl; const char* const endptr = gu_str2dbl (value, &dbl); if (dbl >= 0.0 && dbl < 1.0 && *endptr == '\0') { if (dbl == conn->params.recv_q_soft_limit) return 0; gu_config_set_double (conn->config, GCS_PARAMS_RECV_Q_SOFT_LIMIT, dbl); conn->params.recv_q_soft_limit = dbl; return 0; } else { return -EINVAL; } } static long _set_max_throttle (gcs_conn_t* conn, const char* value) { double dbl; const char* const endptr = gu_str2dbl (value, &dbl); if (dbl >= 0.0 && dbl < 1.0 && *endptr == '\0') { if (dbl == conn->params.max_throttle) return 0; gu_config_set_double (conn->config, GCS_PARAMS_MAX_THROTTLE, dbl); conn->params.max_throttle = dbl; return 0; } else { return -EINVAL; } } void gcs_register_params (gu::Config& conf) { gcs_params::register_params(conf); gcs_core_register(conf); } long gcs_param_set (gcs_conn_t* conn, const char* key, const char *value) { if (!strcmp (key, GCS_PARAMS_FC_LIMIT)) { return _set_fc_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_FC_FACTOR)) { return _set_fc_factor (conn, value); } else if (!strcmp (key, GCS_PARAMS_FC_DEBUG)) { return _set_fc_debug (conn, value); } else if (!strcmp (key, GCS_PARAMS_SYNC_DONOR)) { return _set_sync_donor (conn, value); } else if (!strcmp (key, GCS_PARAMS_MAX_PKT_SIZE)) { return _set_pkt_size (conn, value); } else if (!strcmp (key, GCS_PARAMS_RECV_Q_HARD_LIMIT)) { return _set_recv_q_hard_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_RECV_Q_SOFT_LIMIT)) { return _set_recv_q_soft_limit (conn, value); } else if (!strcmp (key, GCS_PARAMS_MAX_THROTTLE)) { return _set_max_throttle (conn, value); } #ifdef GCS_SM_DEBUG else if (!strcmp (key, GCS_PARAMS_SM_DUMP)) { gcs_sm_dump_state(conn->sm, stderr); return 0; } #endif /* GCS_SM_DEBUG */ else { return gcs_core_param_set (conn->core, key, value); } } const char* gcs_param_get (gcs_conn_t* conn, const char* key) { gu_warn ("Not implemented: %s", __FUNCTION__); return NULL; } galera-4-26.4.25/gcs/src/gcs_gcomm.hpp000644 000164 177776 00000000427 15107057155 020531 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ #ifndef _gcs_gcomm_h_ #define _gcs_gcomm_h_ #include "gcs_backend.hpp" extern GCS_BACKEND_REGISTER_FN(gcs_gcomm_register); extern GCS_BACKEND_CREATE_FN(gcs_gcomm_create); #endif /* _gcs_vs_h_ */ galera-4-26.4.25/gcs/src/gcs_backend.hpp000644 000164 177776 00000013647 15107057155 021026 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * This header defines GC backend interface. * Since we can't know backend context in advance, * we have to use type void*. Kind of unsafe. */ #ifndef _gcs_backend_h_ #define _gcs_backend_h_ #include "gcs.hpp" #include "gcs_recv_msg.hpp" #include #include #include typedef struct gcs_backend_conn gcs_backend_conn_t; typedef struct gcs_backend gcs_backend_t; /* * The macros below are declarations of backend functions * (kind of function signatures) */ /*! Registers configuration parameters with config */ #define GCS_BACKEND_REGISTER_FN(fn) \ bool fn (gu_config_t* cnf) /*! Allocates backend context and sets up the backend structure */ #define GCS_BACKEND_CREATE_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* const addr, \ gu_config_t* const cnf) /*! Deallocates backend context */ #define GCS_BACKEND_DESTROY_FN(fn) \ long fn (gcs_backend_t* backend) /*! Puts backend handle into operating state */ #define GCS_BACKEND_OPEN_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* const channel, \ bool const bootstrap) /*! Puts backend handle into non-operating state */ #define GCS_BACKEND_CLOSE_FN(fn) \ long fn (gcs_backend_t* backend) /*! * Send a message from the backend. * * @param backend * a pointer to the backend handle * @param buf * a buffer to copy the message to * @param len * length of the supplied buffer * @param msg_type * type of the message * @return * negative error code in case of error * OR * amount of bytes sent */ #define GCS_BACKEND_SEND_FN(fn) \ long fn (gcs_backend_t* const backend, \ const void* const buf, \ size_t const len, \ gcs_msg_type_t const msg_type) /*! * Receive a message from the backend. * * @param backend * a pointer to the backend object * @param buf * a buffer to copy the message to * @param len * length of the supplied buffer * @param msg_type * type of the message * @param sender_id * unique sender ID in this configuration * @param timeout * absolute timeout date in nanoseconds * @return * negative error code in case of error * OR * the length of the message, so if it is bigger * than len, it has to be reread with a bigger buffer */ #define GCS_BACKEND_RECV_FN(fn) \ long fn (gcs_backend_t* const backend, \ gcs_recv_msg_t* const msg, \ long long const timeout) /* for lack of better place define it here */ static const long GCS_SENDER_NONE = -1; /** When there's no sender */ /*! Returns symbolic name of the backend */ #define GCS_BACKEND_NAME_FN(fn) \ const char* fn (void) /*! * Returns the size of the message such that resulting network packet won't * exceed given value (basically, pkt_size - headers). * * @param backend * backend handle * @param pkt_size * desired size of a network packet * @return * - message size coresponding to the desired network packet size OR * - maximum message size the backend supports if requested packet size * is too big OR * - negative amount by which the packet size must be increased in order * to send at least 1 byte. */ #define GCS_BACKEND_MSG_SIZE_FN(fn) \ long fn (gcs_backend_t* const backend, \ long const pkt_size) /*! * @param backend * backend handle * @param key * parameter name * @param value * parameter value * @return 1 if parameter not recognized, 0 in case of success and negative * error code in case of error */ #define GCS_BACKEND_PARAM_SET_FN(fn) \ long fn (gcs_backend_t* backend, \ const char* key, \ const char* value) /*! * @param backend * backend handle * @param key * parameter name * @return NULL if parameter not recognized */ #define GCS_BACKEND_PARAM_GET_FN(fn) \ const char* fn (gcs_backend_t* backend, \ const char* key) /*! * @param backend * backend handle * @param status * reference to status variable map */ #define GCS_BACKEND_STATUS_GET_FN(fn) \ void fn(gcs_backend_t* backend, \ gu::Status& status) typedef GCS_BACKEND_CREATE_FN ((*gcs_backend_create_t)); typedef GCS_BACKEND_DESTROY_FN ((*gcs_backend_destroy_t)); typedef GCS_BACKEND_OPEN_FN ((*gcs_backend_open_t)); typedef GCS_BACKEND_CLOSE_FN ((*gcs_backend_close_t)); typedef GCS_BACKEND_SEND_FN ((*gcs_backend_send_t)); typedef GCS_BACKEND_RECV_FN ((*gcs_backend_recv_t)); typedef GCS_BACKEND_NAME_FN ((*gcs_backend_name_t)); typedef GCS_BACKEND_MSG_SIZE_FN ((*gcs_backend_msg_size_t)); typedef GCS_BACKEND_PARAM_SET_FN ((*gcs_backend_param_set_t)); typedef GCS_BACKEND_PARAM_GET_FN ((*gcs_backend_param_get_t)); typedef GCS_BACKEND_STATUS_GET_FN ((*gcs_backend_status_get_t)); struct gcs_backend { gcs_backend_conn_t* conn; gcs_backend_open_t open; gcs_backend_close_t close; gcs_backend_destroy_t destroy; gcs_backend_send_t send; gcs_backend_recv_t recv; gcs_backend_name_t name; gcs_backend_msg_size_t msg_size; gcs_backend_param_set_t param_set; gcs_backend_param_get_t param_get; gcs_backend_status_get_t status_get; }; /*! * Registers backends' parameters with config. */ bool gcs_backend_register(gu_config_t* conf); /*! * Initializes preallocated backend object and opens backend connection * (sort of like 'new') */ long gcs_backend_init (gcs_backend_t* bk, const char* uri, gu_config_t* cnf); #endif /* _gcs_backend_h_ */ galera-4-26.4.25/gcs/src/gcs_gcomm.cpp000644 000164 177776 00000054054 15107057155 020531 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2009-2019 Codership Oy */ /*! * @file GComm GCS Backend implementation * * @todo Figure out if there is lock-free way to handle RecvBuf * push/pop operations. * */ #include "gcs_gcomm.hpp" // We access data comp msg struct directly #define GCS_COMP_MSG_ACCESS 1 #include "gcs_comp_msg.hpp" #include #include #include #include #include #include #include #include #include using namespace std; using namespace gu; using namespace gu::datetime; using namespace gcomm; static const std::string gcomm_thread_schedparam_opt("gcomm.thread_prio"); class RecvBufData { public: RecvBufData(const size_t source_idx, const Datagram& dgram, const ProtoUpMeta& um) : source_idx_(source_idx), dgram_ (dgram), um_ (um) { } size_t get_source_idx() const { return source_idx_; } const Datagram& get_dgram() const { return dgram_; } const ProtoUpMeta& get_um() const { return um_; } private: size_t source_idx_; Datagram dgram_; ProtoUpMeta um_; }; #if defined(GALERA_USE_BOOST_POOL_ALLOC) #include typedef deque > #else typedef deque #endif /* GALERA_USE_BOOST_POOL_ALLOC */ RecvBufQueue; class RecvBuf { private: class Waiting { public: Waiting (bool& w) : w_(w) { w_ = true; } ~Waiting() { w_ = false; } private: bool& w_; }; public: RecvBuf() : mutex_(), cond_(), queue_(), waiting_(false) { } void push_back(const RecvBufData& p) { Lock lock(mutex_); queue_.push_back(p); if (waiting_ == true) { cond_.signal(); } } const RecvBufData& front(const Date& timeout) { Lock lock(mutex_); while (queue_.empty()) { Waiting w(waiting_); if (gu_likely (timeout == GU_TIME_ETERNITY)) { lock.wait(cond_); } else { lock.wait(cond_, timeout); } } assert (false == waiting_); return queue_.front(); } void pop_front() { Lock lock(mutex_); assert(queue_.empty() == false); queue_.pop_front(); } private: Mutex mutex_; Cond cond_; RecvBufQueue queue_; bool waiting_; }; class GCommConn : public Toplay { public: GCommConn(const URI& u, gu::Config& cnf) : Toplay(cnf), conf_(cnf), uuid_(), thd_(), schedparam_(conf_.get(gcomm_thread_schedparam_opt)), uri_(u), net_(Protonet::create(conf_)), tp_(0), mutex_(), refcnt_(0), terminated_(false), error_(0), recv_buf_(), current_view_(), connect_task_() { log_info << "backend: " << net_->type(); } ~GCommConn() { delete tp_; delete net_; } const gcomm::UUID& get_uuid() const { return uuid_; } void connect(bool) { } void connect(string channel, bool const bootstrap); void close(bool force = false) { if (tp_ == 0) { log_warn << "gcomm: backend already closed"; return; } { gcomm::Critical crit(*net_); log_info << "gcomm: terminating thread"; terminate(); } log_info << "gcomm: joining thread"; gu_thread_join(thd_, 0); { gcomm::Critical crit(*net_); log_info << "gcomm: closing backend"; tp_->close(error_ != 0 || force == true); gcomm::disconnect(tp_, this); delete tp_; tp_ = 0; } log_info << "gcomm: closed"; } void run(); void notify() { net_->interrupt(); } void terminate() { Lock lock(mutex_); terminated_ = true; net_->interrupt(); } void handle_up (const void* id, const Datagram& dg, const ProtoUpMeta& um); RecvBuf& get_recv_buf() { return recv_buf_; } size_t get_mtu() const { if (tp_ == 0) { gu_throw_fatal << "GCommConn::get_mtu(): " << "backend connection not open"; } return tp_->mtu(); } Protonet& get_pnet() { return *net_; } gu::Config& get_conf() { return conf_; } int get_error() const { return error_; } void get_status(gu::Status& status) const { if (tp_ != 0) tp_->get_status(status); } gu::ThreadSchedparam schedparam() const { return schedparam_; } class Ref { public: Ref(gcs_backend_t* ptr, bool unset = false) : conn_(0) { if (ptr->conn != 0) { conn_ = reinterpret_cast(ptr->conn)->ref(unset); if (unset == true) { ptr->conn = 0; } } } ~Ref() { if (conn_ != 0) { conn_->unref(); } } GCommConn* get() { return conn_; } private: Ref(const Ref&); void operator=(const Ref&); GCommConn* conn_; }; private: GCommConn(const GCommConn&); void operator=(const GCommConn&); GCommConn* ref(const bool unsetting) { return this; } void unref() { } void print_connect_diag(const std::string&, bool boostrap) const; gu::Config& conf_; gcomm::UUID uuid_; gu_thread_t thd_; ThreadSchedparam schedparam_; URI uri_; Protonet* net_; Transport* tp_; Mutex mutex_; size_t refcnt_; bool terminated_; int error_; RecvBuf recv_buf_; View current_view_; std::packaged_task connect_task_; }; extern "C" void* run_fn(void* arg) { static_cast(arg)->run(); gu_thread_exit(0); } void GCommConn::print_connect_diag(const std::string& channel, bool const bootstrap) const { if (bootstrap) { log_info << "gcomm: bootstrapping new group '" << channel << '\''; } else { string peer; URI::AuthorityList::const_iterator i, i_next; for (i = uri_.get_authority_list().begin(); i != uri_.get_authority_list().end(); ++i) { i_next = i; ++i_next; string host; string port; try { host = i->host(); } catch (NotSet&) { } try { port = i->port(); } catch (NotSet&) { } peer += host != "" ? host + ":" + port : ""; if (i_next != uri_.get_authority_list().end()) { peer += ","; } } log_info << "gcomm: connecting to group '" << channel << "', peer '" << peer << "'"; } } void GCommConn::connect(string channel, bool const bootstrap) { if (tp_ != 0) { gu_throw_fatal << "backend connection already open"; } /* This task is invoked at the very beginning of * run() method. */ connect_task_ = std::packaged_task{ [this, channel, bootstrap]() { gcomm::Critical crit(*net_); uri_.set_option("gmcast.group", channel); tp_ = Transport::create(*net_, uri_); gcomm::connect(tp_, this); print_connect_diag(channel, bootstrap); tp_->connect(bootstrap); uuid_ = tp_->uuid(); error_ = 0; log_info << "gcomm: connected"; } }; auto future = connect_task_.get_future(); error_ = ENOTCONN; int err; if ((err = gu_thread_create( &thd_, 0, run_fn, this)) != 0) { gu_throw_system_error(err) << "Failed to create thread"; } thread_set_schedparam(thd_, schedparam_); log_info << "gcomm thread scheduling priority set to " << thread_get_schedparam(thd_) << " "; /* Will throw if an exception was thrown in connect_task. */ future.get(); } void GCommConn::handle_up(const void* id, const Datagram& dg, const ProtoUpMeta& um) { if (um.err_no() != 0) { error_ = um.err_no(); // force backend close close(true); recv_buf_.push_back(RecvBufData(numeric_limits::max(), dg, um)); } else if (um.has_view() == true) { current_view_ = um.view(); recv_buf_.push_back(RecvBufData(numeric_limits::max(), dg, um)); if (current_view_.is_empty()) { log_debug << "handle_up: self leave"; } } else { size_t idx(0); for (NodeList::const_iterator i = current_view_.members().begin(); i != current_view_.members().end(); ++i) { if (NodeList::key(i) == um.source()) { recv_buf_.push_back(RecvBufData(idx, dg, um)); break; } ++idx; } assert(idx < current_view_.members().size()); } } void GCommConn::run() { connect_task_(); if (error_ != 0) return; while (true) { { Lock lock(mutex_); if (terminated_ == true) { break; } } try { net_->event_loop(Sec); } catch (gu::Exception& e) { log_error << "exception from gcomm, backend must be restarted: " << e.what(); // Commented out due to Backtrace() not producing proper // backtraces. // log_info << "attempting to get backtrace:"; // Backtrace().print(std::cerr); gcomm::Critical crit(get_pnet()); handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, e.get_errno())); break; } #if 0 // Disabled catching unknown exceptions due to Backtrace() not // producing proper backtraces. We let the application crash // and deal with diagnostics. catch (...) { log_error << "unknow exception from gcomm, backend must be restarted"; log_info << "attempting to get backtrace:"; Backtrace().print(std::cerr); gcomm::Critical crit(get_pnet()); handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, gu::Exception::E_UNSPEC)); break; } #endif } } //////////////////////////////////////////////////////////////////////////// // // Backend interface implementation // //////////////////////////////////////////////////////////////////////////// static GCS_BACKEND_MSG_SIZE_FN(gcomm_msg_size) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -1; } return ref.get()->get_mtu(); } static GCS_BACKEND_SEND_FN(gcomm_send) { GCommConn::Ref ref(backend); if (gu_unlikely(ref.get() == 0)) { return -EBADFD; } GCommConn& conn(*ref.get()); Datagram dg( SharedBuffer( new Buffer(reinterpret_cast(buf), reinterpret_cast(buf) + len))); int err; // Set thread scheduling params if gcomm thread runs with // non-default params gu::ThreadSchedparam orig_sp; if (conn.schedparam() != gu::ThreadSchedparam::system_default) { try { orig_sp = gu::thread_get_schedparam(gu_thread_self()); gu::thread_set_schedparam(gu_thread_self(), conn.schedparam()); } catch (gu::Exception& e) { err = e.get_errno(); } } { gcomm::Critical crit(conn.get_pnet()); if (gu_unlikely(conn.get_error() != 0)) { err = ECONNABORTED; } else { err = conn.send_down( dg, ProtoDownMeta(msg_type, msg_type == GCS_MSG_CAUSAL ? O_LOCAL_CAUSAL : O_SAFE)); } } if (conn.schedparam() != gu::ThreadSchedparam::system_default) { try { gu::thread_set_schedparam(gu_thread_self(), orig_sp); } catch (gu::Exception& e) { err = e.get_errno(); } } return (err == 0 ? len : -err); } static void fill_cmp_msg(const View& view, const gcomm::UUID& my_uuid, gcs_comp_msg_t* cm) { size_t n(0); for (NodeList::const_iterator i = view.members().begin(); i != view.members().end(); ++i) { const gcomm::UUID& uuid(NodeList::key(i)); log_debug << "member: " << n << " uuid: " << uuid << " segment: " << static_cast(i->second.segment()); // (void)snprintf(cm->memb[n].id, GCS_COMP_MEMB_ID_MAX_LEN, "%s", // uuid._str().c_str()); long ret = gcs_comp_msg_add (cm, uuid.full_str().c_str(), i->second.segment()); if (ret < 0) { gu_throw_error(-ret) << "Failed to add member '" << uuid << "' to component message: " << -ret; } if (uuid == my_uuid) { log_debug << "my index " << n; cm->my_idx = n; } ++n; } } static GCS_BACKEND_RECV_FN(gcomm_recv) { GCommConn::Ref ref(backend); if (gu_unlikely(ref.get() == 0)) return -EBADFD; try { GCommConn& conn(*ref.get()); RecvBuf& recv_buf(conn.get_recv_buf()); const RecvBufData& d(recv_buf.front(timeout)); msg->sender_idx = d.get_source_idx(); const Datagram& dg(d.get_dgram()); const ProtoUpMeta& um(d.get_um()); if (gu_likely(dg.len() != 0)) { assert(dg.len() > dg.offset()); const byte_t* b(gcomm::begin(dg)); const ssize_t pload_len(gcomm::available(dg)); msg->size = pload_len; if (gu_likely(pload_len <= msg->buf_len)) { memcpy(msg->buf, b, pload_len); msg->type = static_cast(um.user_type()); recv_buf.pop_front(); } else { msg->type = GCS_MSG_ERROR; } } else if (um.err_no() != 0) { gcs_comp_msg_t* cm(gcs_comp_msg_leave(ECONNABORTED)); const ssize_t cm_size(gcs_comp_msg_size(cm)); if (cm_size <= msg->buf_len) { memcpy(msg->buf, cm, cm_size); msg->size = cm_size; recv_buf.pop_front(); msg->type = GCS_MSG_COMPONENT; } else { msg->type = GCS_MSG_ERROR; } gcs_comp_msg_delete(cm); } else { assert(um.has_view() == true); const View& view(um.view()); assert(view.type() == V_PRIM || view.type() == V_NON_PRIM); gcs_comp_msg_t* cm(gcs_comp_msg_new(view.type() == V_PRIM, view.is_bootstrap(), view.is_empty() ? -1 : 0, view.members().size(), 0)); const ssize_t cm_size(gcs_comp_msg_size(cm)); if (cm->my_idx == -1) { log_debug << "gcomm recv: self leave"; } msg->size = cm_size; if (gu_likely(cm_size <= msg->buf_len)) { fill_cmp_msg(view, conn.get_uuid(), cm); memcpy(msg->buf, cm, cm_size); recv_buf.pop_front(); msg->type = GCS_MSG_COMPONENT; } else { msg->type = GCS_MSG_ERROR; } gcs_comp_msg_delete(cm); } return msg->size; } catch (Exception& e) { long err = e.get_errno(); if (ETIMEDOUT != err) { log_error << e.what(); } return -err; } } static GCS_BACKEND_NAME_FN(gcomm_name) { static const char *name = "gcomm"; return name; } static GCS_BACKEND_OPEN_FN(gcomm_open) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } GCommConn& conn(*ref.get()); try { conn.connect(channel, bootstrap); } catch (Exception& e) { log_error << "failed to open gcomm backend connection: " << e.get_errno() << ": " << e.what(); return -e.get_errno(); } return 0; } static GCS_BACKEND_CLOSE_FN(gcomm_close) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } GCommConn& conn(*ref.get()); try { // Critical section is entered inside close() call. // gcomm::Critical crit(conn.get_pnet()); conn.close(); } catch (Exception& e) { log_error << "failed to close gcomm backend connection: " << e.get_errno() << ": " << e.what(); gcomm::Critical crit(conn.get_pnet()); conn.handle_up(0, Datagram(), ProtoUpMeta(gcomm::UUID::nil(), ViewId(V_NON_PRIM), 0, 0xff, O_DROP, -1, e.get_errno())); // #661: Pretend that closing was successful, backend should be // in unusable state anyway. This allows gcs to finish shutdown // sequence properly. } return 0; } static GCS_BACKEND_DESTROY_FN(gcomm_destroy) { GCommConn::Ref ref(backend, true); if (ref.get() == 0) { log_warn << "could not get reference to backend conn"; return -EBADFD; } GCommConn* conn(ref.get()); try { delete conn; } catch (Exception& e) { log_warn << "conn destroy failed: " << e.get_errno(); return -e.get_errno(); } return 0; } static GCS_BACKEND_PARAM_SET_FN(gcomm_param_set) { GCommConn::Ref ref(backend); if (ref.get() == 0) { return -EBADFD; } Protolay::sync_param_cb_t sync_param_cb; GCommConn& conn(*ref.get()); try { gcomm::Critical crit(conn.get_pnet()); if (gu_unlikely(conn.get_error() != 0)) { return -ECONNABORTED; } if (conn.get_pnet().set_param(key, value, sync_param_cb) == false) { log_debug << "param " << key << " not recognized"; return 1; } } catch (gu::Exception& e) { log_warn << "error setting param " << key << " to value " << value << ": " << e.what(); return -e.get_errno(); } catch (gu::NotFound& nf) { log_warn << "error setting param " << key << " to value " << value; return -EINVAL; } catch (gu::NotSet& nf) { log_warn << "error setting param " << key << " to value " << value; return -EINVAL; } catch (...) { log_fatal << "gcomm param set: caught unknown exception"; return -ENOTRECOVERABLE; } if (!sync_param_cb.empty()) { sync_param_cb(); } return 0; } static GCS_BACKEND_PARAM_GET_FN(gcomm_param_get) { return NULL; } static GCS_BACKEND_STATUS_GET_FN(gcomm_status_get) { GCommConn::Ref ref(backend); if (ref.get() == 0) { gu_throw_error(-EBADFD) << "Could not get status from gcomm backend"; } GCommConn& conn(*ref.get()); gcomm::Critical crit(conn.get_pnet()); conn.get_status(status); } GCS_BACKEND_REGISTER_FN(gcs_gcomm_register) { try { reinterpret_cast(cnf)->add(gcomm_thread_schedparam_opt, ""); gcomm::Conf::register_params(*reinterpret_cast(cnf)); return false; } catch (...) { return true; } } GCS_BACKEND_CREATE_FN(gcs_gcomm_create) { GCommConn* conn(0); if (!cnf) { log_error << "Null config object passed to constructor."; return -EINVAL; } try { gu::URI uri(std::string("pc://") + addr); gu::Config& conf(*reinterpret_cast(cnf)); conn = new GCommConn(uri, conf); } catch (Exception& e) { log_error << "failed to create gcomm backend connection: " << e.get_errno() << ": " << e.what(); return -e.get_errno(); } backend->open = gcomm_open; backend->close = gcomm_close; backend->destroy = gcomm_destroy; backend->send = gcomm_send; backend->recv = gcomm_recv; backend->name = gcomm_name; backend->msg_size = gcomm_msg_size; backend->param_set = gcomm_param_set; backend->param_get = gcomm_param_get; backend->status_get = gcomm_status_get; backend->conn = reinterpret_cast(conn); return 0; } galera-4-26.4.25/gcs/src/gcs_node.cpp000644 000164 177776 00000025473 15107057155 020357 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ #include "gcs_node.hpp" #include "gcs_state_msg.hpp" #include #include #include // gu::PrintBase /*! Initialize node context */ void gcs_node_init (gcs_node_t* const node, gcache_t* cache, const char* const id, const char* const name, const char* const inc_addr, int const gcs_proto_ver, int const repl_proto_ver, int const appl_proto_ver, gcs_segment_t const segment, bool const stateless) { assert(strlen(id) > 0); assert(strlen(id) < sizeof(node->id)); memset (node, 0, sizeof (gcs_node_t)); strncpy ((char*)node->id, id, sizeof(node->id) - 1); node->bootstrap = false; node->status = GCS_NODE_STATE_NON_PRIM; node->name = strdup (name ? name : NODE_NO_NAME); node->inc_addr = strdup (inc_addr ? inc_addr : NODE_NO_ADDR); node->vote_seqno= GCS_NO_VOTE_SEQNO; gcs_defrag_init (&node->app, cache); // GCS_ACT_WRITESET goes only here gcs_defrag_init (&node->oob, NULL); node->gcs_proto_ver = gcs_proto_ver; node->repl_proto_ver = repl_proto_ver; node->appl_proto_ver = appl_proto_ver; node->segment = segment; node->stateless = stateless; } /*! Reset certain node properties after it joins a different cluster. * Setting to 0 as it is what it is initialized to in gcs_node_init(). */ static void node_reset(gcs_node_t* node) { node->bootstrap = false; node->status = GCS_NODE_STATE_NON_PRIM; node->last_applied = 0; node->vote_seqno= GCS_NO_VOTE_SEQNO; node->vote_res = 0; } /*! Move data from one node object to another */ void gcs_node_move (gcs_node_t* dst, gcs_node_t* src) { if (dst->name) free ((char*)dst->name); if (dst->inc_addr) free ((char*)dst->inc_addr); if (dst->state_msg) gcs_state_msg_destroy ((gcs_state_msg_t*)dst->state_msg); memcpy (dst, src, sizeof (gcs_node_t)); gcs_defrag_forget (&src->app); gcs_defrag_forget (&src->oob); src->name = NULL; src->inc_addr = NULL; src->state_msg = NULL; } /*! Mark node's buffers as reset (local node only) */ void gcs_node_reset_local (gcs_node_t* node) { gcs_defrag_reset (&node->app); gcs_defrag_reset (&node->oob); } /*! Reset node's receive buffers */ void gcs_node_reset (gcs_node_t* node) { gcs_defrag_free (&node->app); gcs_defrag_free (&node->oob); gcs_node_reset_local (node); } /*! Deallocate resources associated with the node object */ void gcs_node_free (gcs_node_t* node) { gcs_node_reset (node); if (node->name) { free ((char*)node->name); // was strdup'ed node->name = NULL; } if (node->inc_addr) { free ((char*)node->inc_addr); // was strdup'ed node->inc_addr = NULL; } if (node->state_msg) { gcs_state_msg_destroy ((gcs_state_msg_t*)node->state_msg); node->state_msg = NULL; } } /*! Record state message from the node */ void gcs_node_record_state (gcs_node_t* node, gcs_state_msg_t* state_msg) { if (node->state_msg) { gcs_state_msg_destroy ((gcs_state_msg_t*)node->state_msg); } node->state_msg = state_msg; // copy relevant stuff from state msg into node node->status = gcs_state_msg_current_state (state_msg); // NOTE: it is important that we don't overwrite last_applied from the // state msg yet for compatibility with gcs proto 0. We'll do it in // gcs_node_update_status() after quorum is computed // node->last_applied = gcs_state_msg_last_applied(state_msg); gcs_state_msg_last_vote(state_msg, node->vote_seqno, node->vote_res); gcs_state_msg_get_proto_ver (state_msg, &node->gcs_proto_ver, &node->repl_proto_ver, &node->appl_proto_ver); if (node->name) free ((char*)node->name); node->name = strdup (gcs_state_msg_name (state_msg)); if (node->inc_addr) free ((char*)node->inc_addr); node->inc_addr = strdup (gcs_state_msg_inc_addr (state_msg)); } void gcs_node_set_vote (gcs_node_t* const node, gcs_seqno_t const seqno, int64_t const vote, int const gcs_proto) { assert(0 == vote || seqno >= node->last_applied); assert(seqno > node->vote_seqno); gcs_seqno_t const min_seqno = gcs_proto >= 4 ? node->vote_seqno : std::max(node->last_applied, node->vote_seqno); if (gu_unlikely(seqno <= min_seqno)) { gu_warn ("Received bogus VOTE message: %lld.%0llx, from node %s, " "expected > %lld. Ignoring.", (long long)seqno, (long long)vote, node->id, (long long)min_seqno); /* we should not be here: gcs_group_handle_vote_msg() should have * taken care of it. */ assert(0); } else { node->vote_seqno = seqno; node->vote_res = vote; } } /*! Update node status according to quorum decisions */ void gcs_node_update_status (gcs_node_t* node, const gcs_state_quorum_t* quorum) { if (quorum->primary) { const gu_uuid_t* node_group_uuid = gcs_state_msg_group_uuid ( node->state_msg); const gu_uuid_t* quorum_group_uuid = &quorum->group_uuid; // TODO: what to do when quorum.proto is not supported by this node? if (!gu_uuid_compare (node_group_uuid, quorum_group_uuid)) { // node was a part of this group gcs_seqno_t node_act_id = gcs_state_msg_received (node->state_msg); if (node_act_id == quorum->act_id) { const gcs_node_state_t last_prim_state = gcs_state_msg_prim_state (node->state_msg); if (GCS_NODE_STATE_NON_PRIM == last_prim_state) { // the node just joined, but already is up to date: node->status = GCS_NODE_STATE_JOINED; gu_debug ("#281 Setting %s state to %s", node->name, gcs_node_state_to_str(node->status)); } else { // Keep node state from the previous primary comp. node->status = last_prim_state; gu_debug ("#281,#298 Carry over last prim state for %s: %s", node->name, gcs_node_state_to_str(node->status)); } } else { // gap in sequence numbers, needs a snapshot, demote status if (node->status > GCS_NODE_STATE_PRIM) { gu_info("'%s' demoted %s->PRIMARY due to gap in history: " "%" PRId64 " - %" PRId64, node->name, gcs_node_state_to_str(node->status), node_act_id, quorum->act_id); } node->status = GCS_NODE_STATE_PRIM; } if (quorum->gcs_proto_ver >= 2) { node->last_applied = gcs_state_msg_last_applied(node->state_msg); gcs_state_msg_last_vote(node->state_msg, node->vote_seqno, node->vote_res); } assert(node->last_applied >= 0); } else { // node joins completely different group, clear all status if (node->status > GCS_NODE_STATE_PRIM || node->last_applied > 0) { gu_info ("'%s' has a different history, demoted %s->PRIMARY", node->name, gcs_node_state_to_str(node->status)); } if (quorum->gcs_proto_ver >= 6) { node_reset(node); } node->status = GCS_NODE_STATE_PRIM; } switch (node->status) { case GCS_NODE_STATE_DONOR: if (quorum->version >= 4) { node->desync_count = gcs_state_msg_get_desync_count(node->state_msg); assert(node->desync_count > 0); } else { node->desync_count = 1; } // fall through case GCS_NODE_STATE_SYNCED: node->count_last_applied = true; break; case GCS_NODE_STATE_JOINED: node->count_last_applied =(gcs_state_msg_flags (node->state_msg) & GCS_STATE_FCLA); break; case GCS_NODE_STATE_PRIM: node->last_applied = 0; node->vote_seqno = GCS_NO_VOTE_SEQNO; node->vote_res = 0; // fall through case GCS_NODE_STATE_JOINER: node->count_last_applied = false; break; case GCS_NODE_STATE_NON_PRIM: case GCS_NODE_STATE_MAX: gu_fatal ("Internal logic error: state %d in " "primary configuration. Aborting.", node->status); abort(); break; } if (GCS_NODE_STATE_DONOR != node->status) { assert(0 ==node->desync_count || GCS_NODE_STATE_PRIM==node->status); node->desync_count = 0; } else { assert(node->desync_count > 0); } } else { /* Probably don't want to change anything here, quorum was a failure * anyway. This could be due to this being transient component, lacking * joined nodes from the configuraiton. May be next component will be * better. * * UPDATE (28.06.2011): as #477 shows, we need some consistency here: */ node->status = GCS_NODE_STATE_NON_PRIM; } /* Clear bootstrap flag so that it does not get carried to * subsequent configuration changes. */ node->bootstrap = false; node->stateless = (gcs_state_msg_flags (node->state_msg) & GCS_STATE_FSTATELESS); } void gcs_node_print(std::ostream& os, const gcs_node_t& node) { os << "ID:\t '" << node.id << "'\n" << "joiner:\t'" << node.joiner << "'\n" << "donor:\t '" << node.donor << "'\n" << "name:\t '" << node.name << "'\n" << "incoming: " << node.inc_addr << '\n' << "last_app: " << node.last_applied << '\n' << "count_la: " << (node.count_last_applied ? "YES" : "NO") << '\n' << "vote_seq: " << node.vote_seqno << '\n' << "vote_res: " << gu::PrintBase<>(node.vote_res) << '\n' << "proto(g/r/a): " << node.gcs_proto_ver << '/' << node.repl_proto_ver << '/' << node.appl_proto_ver << '\n' << "status:\t " << gcs_node_state_to_str(node.status) << '\n' << "segment: " << int(node.segment) << '\n' << "bootstrp: " << (node.bootstrap ? "YES" : "NO") << '\n' << "arbitr: " << (node.stateless ? "YES" : "NO"); } galera-4-26.4.25/gcs/src/gcs_core.hpp000644 000164 177776 00000014217 15107057155 020361 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ /* * This header defines generic communication layer * which implements basic open/close/send/receive * functions. Its purpose is to implement all * functionality common to all group communication * uses. Currently this amounts to action * fragmentation/defragmentation and invoking backend * functions. * In the course of development it has become clear * that such fuctionality must be collected in a * separate layer. * Application abstraction layer is based on this one * and uses those functions for its own purposes. */ #ifndef _gcs_core_h_ #define _gcs_core_h_ #include "gcs.hpp" #include "gcs_act.hpp" #include "gcs_act_proto.hpp" #include #include #include #include /* 'static' method to register configuration variables */ extern void gcs_core_register (gu::Config& conf); struct gcs_core; typedef struct gcs_core gcs_core_t; /* * Allocates context resources private to * generic communicaton layer - send/recieve buffers and the like. * @param gcs_proto_ver only for unit tests */ extern gcs_core_t* gcs_core_create (gu::Config& conf, gcache_t* cache, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver, int gcs_proto_ver = GCS_PROTO_MAX); /* initializes action history position from gtid. See gcs.hpp */ extern long gcs_core_init (gcs_core_t* core, const gu::GTID& position); /* * gcs_core_open() opens connection * Return values: * zero - success * negative - error code */ extern long gcs_core_open (gcs_core_t* conn, const char* channel, const char* url, bool bootstrap); /* * gcs_core_close() puts connection in a closed state, * cancelling all ongoing calls. * Return values: * zero - success * negative - error code */ extern long gcs_core_close (gcs_core_t* conn); /* * gcs_core_destroy() frees resources allocated by gcs_core_create() * Return values: * zero - success * negative - error code */ extern long gcs_core_destroy (gcs_core_t* conn); /* * gcs_core_send() atomically sends action to group. * * NOT THREAD SAFE! Access should be serialized. * * Return values: * non-negative - amount of action bytes sent (sans headers) * negative - error code * -EAGAIN - operation should be retried * -ENOTCONN - connection to primary component lost * * NOTE: Successful return code here does not guarantee delivery to group. * The real status of action is determined only in gcs_core_recv() call. */ extern ssize_t gcs_core_send (gcs_core_t* core, const struct gu_buf* act, size_t act_size, gcs_act_type_t act_type); /* * gcs_core_recv() blocks until some action is received from group. * * @param repl_buf ptr to replicated action local buffer (NULL otherwise) * @param timeout absolute timeout date (as in pthread_cond_timedwait()) * * Return values: * non-negative - the size of action received * negative - error code * * @retval -ETIMEDOUT means no messages were received until timeout. * * NOTE: Action status (replicated or not) is carried in act_id. E.g. -ENOTCONN * means connection to primary component was lost while sending, * -ERESTART means that action delivery was interrupted and it must be * resent. */ extern ssize_t gcs_core_recv (gcs_core_t* conn, struct gcs_act_rcvd* recv_act, long long timeout); /* group protocol version */ extern int gcs_core_proto_ver (const gcs_core_t* conn); /* Configuration functions */ /* Sets maximum message size to achieve requested network packet size. * In case of failure returns negative error code, in case of success - * resulting message payload size (size of action fragment) */ extern int gcs_core_set_pkt_size (gcs_core_t* conn, int pkt_size); /* sends this node's last applied value to group */ extern int gcs_core_set_last_applied (gcs_core_t* core, const gu::GTID& gtid); /* sends status of the ended snapshot (snapshot gtid or error code) */ extern int gcs_core_send_join (gcs_core_t* core, const gu::GTID& gtid, int code); /* sends SYNC notice, gtid currently has no meaning */ extern int gcs_core_send_sync (gcs_core_t* core, const gu::GTID& gtid); /* sends vote on GTID outcome */ extern int gcs_core_send_vote (gcs_core_t* core, const gu::GTID& gtid, int64_t code, const void* msg, size_t msg_len); /* sends flow control message */ extern ssize_t gcs_core_send_fc (gcs_core_t* core, const void* fc, size_t fc_size); extern long gcs_core_caused(gcs_core_t* core, gu::GTID& gtid); extern int gcs_core_param_set (gcs_core_t* core, const char* key, const char* value); extern const char* gcs_core_param_get (gcs_core_t* core, const char* key); void gcs_core_get_status(gcs_core_t* core, gu::Status& status); void gcs_core_get_protocols(gcs_core_t* core, int& appl, int& repl, int& gcs); #ifdef GCS_CORE_TESTING // things compiled only for unit tests /* gcs_core_send() interface does not allow enough concurrency control to model * various race conditions for unit testing - it is not atomic. The functions * below expose gcs_core unit internals solely for the purpose of testing */ #include "gcs_msg_type.hpp" #include "gcs_backend.hpp" extern gcs_backend_t* gcs_core_get_backend (gcs_core_t* core); // switches lock-step mode on/off extern void gcs_core_send_lock_step (gcs_core_t* core, bool enable); // step through action send process (send another fragment). // returns positive number if there was a send thread waiting for it. extern long gcs_core_send_step (gcs_core_t* core, long timeout_ms); extern void gcs_core_set_state_uuid (gcs_core_t* core, const gu_uuid_t* uuid); #include "gcs_group.hpp" extern const gcs_group_t* gcs_core_get_group (const gcs_core_t* core); #include "gcs_fifo_lite.hpp" extern gcs_fifo_lite_t* gcs_core_get_fifo (gcs_core_t* core); #endif /* GCS_CORE_TESTING */ #endif /* _gcs_core_h_ */ galera-4-26.4.25/gcs/src/gcs_error.hpp000644 000164 177776 00000003265 15107057155 020563 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2024 Codership Oy */ /*! @file gcs_error.hpp * * Error code to error string translation according to GCS conventions. */ #ifndef GCS_ERROR_HPP #define GCS_ERROR_HPP /*! * Return an error string associated with a system error code for gcs calls * where the error code does not come from system call. As a fallback, * error string for unhandled error codes are obtained by strerror() * system call. * * This function follows the following conventions for system error * codes for group communication errors: * * EAGAIN - Operation failed temporarily due to group configuration * change or flow control. * ENOTCONN, EPERM - Not in primary component. * ECONNABORTED - Connection was closed while the operation was in progress. * ETIMEDOUT - Operation timed out. * EBADF - Connection was not initialized. * * @param err System error code. * @return Error string describing the error condition. */ const char* gcs_error_str(int err); /*! * Return and errorstring associated with a system error code for * state transfer requests. As a fallback, error string for unhandled * error codes are obtained by strerror() system call. * * The function follows the following conventions for system error codes * for state transfer request errors (for details, see donor selection in * gcs_group.cpp): * * EAGAIN - No donors available in suitable state. * EHOSTUNREACH - Requested donor is not avaialble. * EHOSTDOWN - Joiner and donor can't be the same node. * * @param err System error code. * @return Error string describing state transfer error condition. */ const char* gcs_state_transfer_error_str(int err); #endif /* GCS_ERROR_HPP */ galera-4-26.4.25/gcs/src/gcs_group.hpp000644 000164 177776 00000021342 15107057155 020562 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ /* * This header defines node specific context we need to maintain */ #ifndef _gcs_group_h_ #define _gcs_group_h_ #include #include "gcs_gcache.hpp" #include "gcs_node.hpp" #include "gcs_recv_msg.hpp" #include "gcs_seqno.hpp" #include "gcs_state_msg.hpp" #include "gu_unordered.hpp" #include "gu_config.hpp" extern std::string const GCS_STATELESS_KEY; extern std::string const GCS_VOTE_POLICY_KEY; extern uint8_t gcs_group_conf_to_vote_policy(gu::Config& cnf); #include "gu_status.hpp" #include "gu_utils.hpp" typedef enum gcs_group_state { GCS_GROUP_NON_PRIMARY, GCS_GROUP_WAIT_STATE_UUID, GCS_GROUP_WAIT_STATE_MSG, GCS_GROUP_PRIMARY, GCS_GROUP_INCONSISTENT, GCS_GROUP_STATE_MAX } gcs_group_state_t; extern const char* gcs_group_state_str[]; typedef gu::UnorderedMap VoteHistory; struct VoteResult { gcs_seqno_t seqno; int64_t res; }; typedef struct gcs_group { gcache_t* cache; gu::Config& cnf; gcs_seqno_t act_id_; // current(last) action seqno gcs_seqno_t conf_id; // current configuration seqno gu_uuid_t state_uuid; // state exchange id gu_uuid_t group_uuid; // group UUID long num; // number of nodes long my_idx; // my index in the group const char* my_name; const char* my_address; gcs_group_state_t state; // group state: PRIMARY | NON_PRIMARY gcs_seqno_t last_applied; // last_applied action group-wide long last_node; // node that last reported commit_cut gcs_seqno_t vote_request_seqno; // last vote request was passed for it VoteResult vote_result; // last vote result VoteHistory vote_history; // history of group votes uint8_t vote_policy; bool frag_reset; // indicate that fragmentation was reset bool stateless; gcs_node_t* nodes; // array of node contexts /* values from the last primary component */ gu_uuid_t prim_uuid; gu_seqno_t prim_seqno; long prim_num; gcs_node_state_t prim_state; int prim_gcs_ver; int prim_repl_ver; int prim_appl_ver; /* max supported protocols */ gcs_proto_t const gcs_proto_ver; int const repl_proto_ver; int const appl_proto_ver; gcs_state_quorum_t quorum; int last_applied_proto_ver; gcs_group(gu::Config& cnf, gcache_t* cache, const char* node_name, ///< can be null const char* inc_addr, ///< can be null gcs_proto_t gcs_proto_ver, int repl_proto_ver, int appl_proto_ver); ~gcs_group(); void get_protocols(int& appl, int& repl, int& gcs) { appl = quorum.appl_proto_ver; repl = quorum.repl_proto_ver; gcs = quorum.gcs_proto_ver; } static void register_params(gu::Config& cnf); } gcs_group_t; /*! * Initialize group action history parameters. See gcs.h */ extern int gcs_group_init_history (gcs_group_t* group, const gu::GTID& position); #ifdef GCS_CORE_TESTING /*! * Free group nodes. Should not be used directly, exposed only for * unit tests. */ extern void group_nodes_free (gcs_group_t* group); #endif // GCS_CORE_TESTING /*! Forget the action if it is not to be delivered */ extern void gcs_group_ignore_action (gcs_group_t* group, struct gcs_act_rcvd* rcvd); /*! * Handles component message - installs new membership, * cleans old one. * * @return * group state in case of success or * negative error code. */ extern gcs_group_state_t gcs_group_handle_comp_msg (gcs_group_t* group, const gcs_comp_msg_t* msg); extern gcs_group_state_t gcs_group_handle_uuid_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); extern gcs_group_state_t gcs_group_handle_state_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); extern gcs_seqno_t gcs_group_handle_last_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); extern VoteResult gcs_group_handle_vote_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 for success, 1 for (success && i_am_sender) * or negative error code */ extern int gcs_group_handle_join_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 for success, 1 for (success && i_am_sender) * or negative error code */ extern int gcs_group_handle_sync_msg (gcs_group_t* group, const gcs_recv_msg_t* msg); /*! @return 0 if request is ignored, request size if it should be passed up */ extern int gcs_group_handle_state_request (gcs_group_t* group, struct gcs_act_rcvd* act); /*! * Handles action message. Is called often - therefore, inlined * * @return negative - error code, 0 - continue, positive - complete action */ static inline ssize_t gcs_group_handle_act_msg (gcs_group_t* const group, const gcs_act_frag_t* const frg, const gcs_recv_msg_t* const msg, struct gcs_act_rcvd* const rcvd, bool commonly_supported_version) { int const sender_idx = msg->sender_idx; bool const local = (sender_idx == group->my_idx); ssize_t ret; assert (GCS_MSG_ACTION == msg->type); assert (sender_idx < group->num); assert (frg->act_id > 0); assert (frg->act_size > 0); // clear reset flag if set by own first fragment after reset flag was set group->frag_reset = (group->frag_reset && !(local && 0 == frg->frag_no && GCS_GROUP_PRIMARY == group->state)); ret = gcs_node_handle_act_frag (&group->nodes[sender_idx], frg, &rcvd->act, local); if (ret > 0) { assert (ret == rcvd->act.buf_len); rcvd->act.type = frg->act_type; rcvd->sender_idx = sender_idx; if (gu_likely(GCS_ACT_WRITESET == rcvd->act.type && GCS_GROUP_PRIMARY == group->state && group->nodes[sender_idx].status >= GCS_NODE_STATE_DONOR && !(group->frag_reset && local) && commonly_supported_version)) { /* Common situation - * increment and assign act_id only for totally ordered actions * and only in PRIM (skip messages while in state exchange) */ rcvd->id = ++group->act_id_; } else if (GCS_ACT_WRITESET == rcvd->act.type) { /* Rare situations */ if (local) { /* Let the sender know that it failed */ rcvd->id = -ERESTART; gu_debug("Returning -ERESTART for WRITESET action: group->state" " = %s, sender->status = %s, frag_reset = %s, " "buf = %p", gcs_group_state_str[group->state], gcs_node_state_to_str(group->nodes[sender_idx].status), group->frag_reset ? "true" : "false", rcvd->act.buf); } else { /* Just ignore it */ ret = 0; gcs_group_ignore_action (group, rcvd); } } } return ret; } static inline gcs_group_state_t gcs_group_state (const gcs_group_t* group) { return group->state; } static inline bool gcs_group_is_primary (const gcs_group_t* group) { return (GCS_GROUP_PRIMARY == group->state); } static inline int gcs_group_my_idx (const gcs_group_t* group) { return group->my_idx; } /*! * Creates new configuration action * @param group group handle * @param rcvd GCS action object * @param proto protocol version gcs should use for this configuration */ extern ssize_t gcs_group_act_conf (gcs_group_t* group, struct gcs_act_rcvd* rcvd, int* proto); /*! Returns state object for state message */ extern gcs_state_msg_t* gcs_group_get_state (const gcs_group_t* group); /*! * find a donor and return its index, if available. pure function. * @return donor index of negative error code. * -EHOSTUNREACH if no available donor. * -EHOSTDOWN if donor is joiner. * -EAGAIN if no node in proper state. */ extern int gcs_group_find_donor(const gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, int const donor_len, const gu::GTID& ist_gtid); extern int gcs_group_param_set(gcs_group_t& group, const std::string& key, const std::string& val); extern void gcs_group_get_status(const gcs_group_t* group, gu::Status& status); #endif /* _gcs_group_h_ */ galera-4-26.4.25/gcs/src/gcs_priv.hpp000644 000164 177776 00000000412 15107057155 020401 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011 Codership Oy * * $Id$ */ /*! * @file gcs_priv.h Global declarations private to GCS */ #ifndef _gcs_priv_h_ #define _gcs_priv_h_ #include "gcs.hpp" #define GCS_DESYNC_REQ "self-desync" #endif /* _gcs_priv_h_ */ galera-4-26.4.25/gcs/src/gcs_gcache.hpp000644 000164 177776 00000001443 15107057155 020640 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011 Codership Oy * * $Id$ */ #ifndef _gcs_gcache_h_ #define _gcs_gcache_h_ #ifndef GCS_FOR_GARB #include #else #ifndef gcache_t struct gcache_st; typedef struct gcache_st gcache_t; #endif #endif #include #include static inline void* gcs_gcache_malloc (gcache_t* gcache, size_t size) { #ifndef GCS_FOR_GARB if (gu_likely(gcache != NULL)) return gcache_malloc (gcache, size); else #endif return ::malloc (size); } static inline void gcs_gcache_free (gcache_t* gcache, const void* buf) { #ifndef GCS_FOR_GARB if (gu_likely (gcache != NULL)) gcache_free (gcache, buf); else #endif ::free (const_cast(buf)); } #endif /* _gcs_gcache_h_ */ galera-4-26.4.25/gcs/src/gcs_group.cpp000644 000164 177776 00000221421 15107057155 020555 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ #include "gcs_group.hpp" #include "gcs_gcache.hpp" #include "gcs_priv.hpp" #include "gcs_code_msg.hpp" #include "gcs_error.hpp" #include #include #include #include #include #include std::string const GCS_STATELESS_KEY("gcs.stateless"); bool const GCS_STATELESS_DEFAULT(false); std::string const GCS_VOTE_POLICY_KEY("gcs.vote_policy"); uint8_t const GCS_VOTE_POLICY_DEFAULT(0); std::string const GCS_CHECK_APPL_PROTO_KEY("gcs.check_appl_proto"); bool const GCS_CHECK_APPL_PROTO_DEFAULT(true); void gcs_group::register_params(gu::Config& cnf) { cnf.add(GCS_STATELESS_KEY, gu::Config::Flag::read_only | gu::Config::Flag::type_bool); cnf.add(GCS_VOTE_POLICY_KEY, gu::Config::Flag::read_only | gu::Config::Flag::type_integer); cnf.add(GCS_CHECK_APPL_PROTO_KEY, std::to_string(GCS_CHECK_APPL_PROTO_DEFAULT), gu::Config::Flag::type_bool); } const char* gcs_group_state_str[GCS_GROUP_STATE_MAX] = { "NON_PRIMARY", "WAIT_STATE_UUID", "WAIT_STATE_MSG", "PRIMARY" }; static bool group_conf_stateless_flag(gu::Config& cnf) { return cnf.get(GCS_STATELESS_KEY, GCS_STATELESS_DEFAULT); } static bool group_conf_check_appl_proto(gu::Config& cnf) { return cnf.get(GCS_CHECK_APPL_PROTO_KEY, GCS_CHECK_APPL_PROTO_DEFAULT); } uint8_t gcs_group_conf_to_vote_policy(gu::Config& cnf) { int64_t i(cnf.get(GCS_VOTE_POLICY_KEY, int64_t(GCS_VOTE_POLICY_DEFAULT))); if (i < 0 || i >= std::numeric_limits::max()) { log_warn << "Bogus '" << GCS_VOTE_POLICY_KEY << "' from config: " << i << ". Reverting to default."; // or throw? return GCS_VOTE_POLICY_DEFAULT; } return i; } gcs_group::gcs_group(gu::Config& cnf, gcache_t* cache, const char* node_name, ///< can be null const char* inc_addr, ///< can be null gcs_proto_t gcs_proto_ver, int repl_proto_ver, int appl_proto_ver) : cache (cache), cnf (cnf), act_id_ (GCS_SEQNO_ILL), conf_id (GCS_SEQNO_ILL), state_uuid (GU_UUID_NIL), group_uuid (GU_UUID_NIL), num (0), my_idx (-1), my_name (strdup(node_name ? node_name : NODE_NO_NAME)), my_address (strdup(inc_addr ? inc_addr : NODE_NO_ADDR)), state (GCS_GROUP_NON_PRIMARY), last_applied (act_id_), last_node (-1), vote_request_seqno (GCS_NO_VOTE_SEQNO), vote_result ((VoteResult){ GCS_NO_VOTE_SEQNO, 0 }), vote_history (), vote_policy (gcs_group_conf_to_vote_policy(cnf)), frag_reset (true), // just in case stateless (group_conf_stateless_flag(cnf)), nodes (NULL), prim_uuid (GU_UUID_NIL), prim_seqno (GCS_SEQNO_ILL), prim_num (0), prim_state (GCS_NODE_STATE_NON_PRIM), prim_gcs_ver (0), prim_repl_ver (0), prim_appl_ver (0), gcs_proto_ver (gcs_proto_ver), repl_proto_ver(repl_proto_ver), appl_proto_ver(appl_proto_ver), quorum (GCS_QUORUM_NON_PRIMARY), last_applied_proto_ver(-1) {} int gcs_group_init_history (gcs_group_t* group, const gu::GTID& gtid) { bool const negative_seqno(gtid.seqno() < 0); bool const nil_uuid(gtid.uuid() == GU_UUID_NIL); if (negative_seqno && !nil_uuid) { log_error << "Non-nil history UUID with negative seqno makes no sense: " << gtid; return -EINVAL; } else if (!negative_seqno && nil_uuid) { log_error <<"Non-negative state seqno requires non-nil history UUID: " << gtid; return -EINVAL; } group->act_id_ = gtid.seqno(); group->last_applied = group->act_id_; group->group_uuid = gtid.uuid()(); return 0; } /* Initialize nodes array from component message */ static inline gcs_node_t* group_nodes_init (const gcs_group_t* group, const gcs_comp_msg_t* comp) { const long my_idx = gcs_comp_msg_self (comp); const long nodes_num = gcs_comp_msg_num (comp); gcs_node_t* ret = GU_CALLOC (nodes_num, gcs_node_t); long i; if (ret) { for (i = 0; i < nodes_num; i++) { const gcs_comp_memb_t* memb = gcs_comp_msg_member(comp, i); assert(NULL != memb); if (my_idx != i) { gcs_node_init (&ret[i], group->cache, memb->id, NULL, NULL, -1, -1, -1, memb->segment, false); } else { // this node gcs_node_init (&ret[i], group->cache, memb->id, group->my_name, group->my_address, group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, memb->segment, group->stateless); } assert(ret[i].last_applied == GCS_SEQNO_NIL); } } else { gu_error ("Could not allocate %ld x %zu bytes", nodes_num, sizeof(gcs_node_t)); } return ret; } /* Free nodes array */ #ifndef GCS_CORE_TESTING static #endif // GCS_CORE_TESTING void group_nodes_free (gcs_group_t* group) { int i; /* cleanup after disappeared members */ for (i = 0; i < group->num; i++) { gcs_node_free (&group->nodes[i]); } if (group->nodes) gu_free (group->nodes); group->nodes = NULL; group->num = 0; group->my_idx = -1; } void gcs_group_free (gcs_group_t* group) { if (group->my_name) free ((char*)group->my_name); if (group->my_address) free ((char*)group->my_address); group_nodes_free (group); } gcs_group::~gcs_group() { gcs_group_free(this); } /* Reset nodes array without breaking the statistics */ static inline void group_nodes_reset (gcs_group_t* group) { int i; /* reset recv_acts at the nodes */ for (i = 0; i < group->num; i++) { if (i != group->my_idx) { gcs_node_reset (&group->nodes[i]); } else { gcs_node_reset_local (&group->nodes[i]); } } group->frag_reset = true; } /*! @return false * if the node is stateless and must not be counted in commit cut */ static inline bool group_count_stateless(const gcs_group_t& group, const gcs_node_t& node) { return (!(group.quorum.gcs_proto_ver > 0 && node.stateless)); } /*! @return true if the node should be counted in commit cut calculations */ static inline bool group_count_last_applied(const gcs_group_t& group, const gcs_node_t& node) { return (node.count_last_applied && group_count_stateless(group, node)); } /* Find node with the smallest last_applied */ static inline void group_redo_last_applied (gcs_group_t* group) { /* protocols 2-3-4 had error in commit cut recalculation */ bool const proto_cond(group->quorum.gcs_proto_ver > 4 || group->quorum.gcs_proto_ver < 2); gu_seqno_t last_applied = GU_LLONG_MAX; int last_node = -1; int n; for (n = 0; n < group->num; n++) { const gcs_node_t* const node = &group->nodes[n]; gcs_seqno_t const seqno = node->last_applied; assert( 0 < group->last_applied_proto_ver || -1 == group->last_applied_proto_ver /* for unit tests */); log_debug << "last_last_applied[" << group->nodes[n].name << "]: " << node->id << ", " << node->last_applied << ", count: " << (group_count_last_applied(*group, *node) ? "yes" : "no"); /* NOTE: It is crucial for consistency that last_applied algorithm * is absolutely identical on all nodes. Therefore for the * generality sake and future compatibility we have to assume * non-blocking donor. * GCS_BLOCKING_DONOR should never be defined unless in some * very custom builds. Commenting it out for safety sake. */ #ifndef GCS_BLOCKING_DONOR if (group_count_last_applied(*group, *node) #else if ((GCS_NODE_STATE_SYNCED == node->status) /* ignore donor */ #endif && (seqno <= last_applied)) { #ifndef NDEBUG if (seqno > 0 && seqno < group->last_applied) { log_info << "Node:\n" << *node << "\nattempts to set last_applied to " << seqno << " below the current " << group->last_applied; } #endif /* NDEBUG */ if (proto_cond || seqno >= group->last_applied) { last_applied = seqno; last_node = n; } else if (seqno < group->last_applied) { if (0 != seqno) { log_debug << "Last applied: " << seqno << " at node " << node->id << " is less than group last applied: " << group->last_applied; /* This is a possible situation since we allow for * the non-determinism in the last applied reporting. * Even a synced node can report a slightly lower number * depending on when it decides to report. */ } // the node has not yet reported its last applied } } // extra diagnostic, ignore //else if (!count) { gu_warn("not counting %d", n); } } if (gu_likely (last_node >= 0)) { assert(last_applied < GU_LLONG_MAX); assert(last_applied >= group->last_applied || proto_cond); /* make sure group-wide last applied is monotonically increasing: * newly SYNCED node may temporarily result in lower last_applied. */ if (last_applied > group->last_applied || group->quorum.gcs_proto_ver < 2) { group->last_applied = last_applied; } /* should always be the most lagging node to trigger recalculation ASAP*/ group->last_node = last_node; } log_debug << "final last_applied on " << group->nodes[group->my_idx].name << ": " << group->last_applied; } static void group_go_non_primary (gcs_group_t* group) { if (group->my_idx >= 0) { assert(group->num > 0); assert(group->nodes); group->nodes[group->my_idx].status = GCS_NODE_STATE_NON_PRIM; //@todo: Perhaps the same has to be applied to the rest of the nodes[]? } else { assert(-1 == group->my_idx); assert(0 == group->num); assert(NULL == group->nodes); } group->state = GCS_GROUP_NON_PRIMARY; group->conf_id = GCS_SEQNO_ILL; // what else? Do we want to change anything about the node here? } static int group_check_proto_ver(gcs_group_t* group) { assert(group->quorum.primary); // must be called only on primary CC gcs_node_t& node(group->nodes[group->my_idx]); bool fail(false); #define GROUP_CHECK_NODE_PROTO_VER(LEVEL) \ if (node.LEVEL < group->quorum.LEVEL) { \ gu_fatal("Group requested %s: %d, max supported by this node: %d." \ " Upgrade the node before joining this group." \ " Must abort.", \ #LEVEL, group->quorum.LEVEL, node.LEVEL); \ fail = true; \ } GROUP_CHECK_NODE_PROTO_VER(gcs_proto_ver); GROUP_CHECK_NODE_PROTO_VER(repl_proto_ver); if (group_conf_check_appl_proto(group->cnf)) { GROUP_CHECK_NODE_PROTO_VER(appl_proto_ver); } #undef GROUP_CHECK_NODE_PROTO_VER if (fail) return -ENOTRECOVERABLE; return 0; } static const char group_empty_id[GCS_COMP_MEMB_ID_MAX_LEN + 1] = { 0, }; static int group_check_donor (gcs_group_t* group) { gcs_node_state_t const my_state = group->nodes[group->my_idx].status; const char* const donor_id = group->nodes[group->my_idx].donor; if (GCS_NODE_STATE_JOINER == my_state && memcmp (donor_id, group_empty_id, sizeof(group_empty_id))) { long i; for (i = 0; i < group->num; i++) { if (i != group->my_idx && !memcmp (donor_id, group->nodes[i].id, sizeof (group->nodes[i].id))) return 0; } gu_warn ("Donor %s is no longer in the group. State transfer cannot " "be completed, need to abort.", donor_id); return -ENOTRECOVERABLE; } return 0; } /*! Processes state messages and sets group parameters accordingly */ static int group_post_state_exchange (gcs_group_t* group) { const gcs_state_msg_t* states[group->num]; gcs_state_quorum_t* quorum = &group->quorum; bool new_exchange = gu_uuid_compare (&group->state_uuid, &GU_UUID_NIL); long i; /* Collect state messages from nodes. */ /* Looping here every time is suboptimal, but simply counting state messages * is not straightforward too: nodes may disappear, so the final count may * include messages from the disappeared nodes. * Let's put it this way: looping here is reliable and not that expensive.*/ for (i = 0; i < group->num; i++) { states[i] = group->nodes[i].state_msg; if (NULL == states[i] || (new_exchange && gu_uuid_compare (&group->state_uuid, gcs_state_msg_uuid(states[i])))) return 0; // not all states from THIS state exch. received, wait } gu_debug ("STATE EXCHANGE: " GU_UUID_FORMAT " complete.", GU_UUID_ARGS(&group->state_uuid)); gcs_state_msg_get_quorum (states, group->num, quorum); assert(quorum->version >= 2); if (quorum->version >= 0) { if (quorum->version < 2) { group->last_applied_proto_ver = 0; } else { group->last_applied_proto_ver = 1; } } else { gu_fatal ("Negative quorum version: %d", quorum->version); return -ENOTRECOVERABLE; } // Update each node state based on quorum outcome: // is it up to date, does it need SST and stuff for (i = 0; i < group->num; i++) { gcs_node_update_status (&group->nodes[i], quorum); } if (quorum->primary) { // primary configuration if (new_exchange) { // new state exchange happened if (!gu_uuid_compare(&group->group_uuid, &quorum->group_uuid) && group->act_id_ > quorum->act_id) { gu_fatal("Reversing history: %lld -> %lld, this member has " "applied %lld more events than the primary component." "Data loss is possible. Must abort.", (long long)group->act_id_, (long long)quorum->act_id, (long long)(group->act_id_ - quorum->act_id)); group->state = GCS_GROUP_INCONSISTENT; return 0; } group->state = GCS_GROUP_PRIMARY; group->act_id_ = quorum->act_id; group->conf_id = quorum->conf_id + 1; group->group_uuid = quorum->group_uuid; group->prim_uuid = group->state_uuid; group->state_uuid = GU_UUID_NIL; if (quorum->gcs_proto_ver == 2) // see below for other versions { /* version 2 was a mistake, but we can't eliminate this code * path for the sake of backward compatibility */ assert(quorum->last_applied >= 0); group->last_applied = quorum->last_applied; } if (quorum->gcs_proto_ver >= 6) { /* removing group->last_applied = quorum->last_applied; * above was a mistake as it allowed last_applied * from the old history survive into the new one. * Reinstating with new protocol version. */ group->last_applied = quorum->last_applied; } } else { // no state exchange happend, processing old state messages assert (GCS_GROUP_PRIMARY == group->state); group->conf_id++; } group->prim_seqno = group->conf_id; group->prim_num = 0; for (i = 0; i < group->num; i++) { group->prim_num += gcs_node_is_joined (group->nodes[i].status); } assert (group->prim_num > 0); #define GROUP_UPDATE_PROTO_VER(LEVEL) \ if (group->prim_##LEVEL##_ver < quorum->LEVEL##_proto_ver) \ group->prim_##LEVEL##_ver = quorum->LEVEL##_proto_ver; GROUP_UPDATE_PROTO_VER(gcs); GROUP_UPDATE_PROTO_VER(repl); GROUP_UPDATE_PROTO_VER(appl); #undef GROUP_UPDATE_PROTO_VER if (quorum->gcs_proto_ver != 2) // see above for version 2 { group_redo_last_applied(group); } } else { // non-primary configuration group_go_non_primary (group); } gu_info ("Quorum results:" "\n\tversion = %u," "\n\tcomponent = %s," "\n\tconf_id = %" PRId64 "," "\n\tmembers = %ld/%ld (joined/total)," "\n\tact_id = %" PRId64 "," "\n\tlast_appl. = %" PRId64 "," "\n\tprotocols = %d/%d/%d (gcs/repl/appl)," "\n\tvote policy= %d," "\n\tgroup UUID = " GU_UUID_FORMAT, quorum->version, quorum->primary ? "PRIMARY" : "NON-PRIMARY", quorum->conf_id, group->prim_num, group->num, quorum->act_id, group->last_applied, quorum->gcs_proto_ver, quorum->repl_proto_ver, quorum->appl_proto_ver, int(quorum->vote_policy), GU_UUID_ARGS(&quorum->group_uuid)); if (quorum->primary) { int const err(group_check_proto_ver(group)); if (err) return err; } return group_check_donor(group); } // does basic sanity check of the component message (in response to #145) static int group_check_comp_msg (bool prim, long my_idx, long members) { if (my_idx >= 0) { if (my_idx < members) return 0; } else { if (!prim && (0 == members)) return 0; } gu_fatal ("Malformed component message from backend: " "%s, idx = %ld, members = %ld", prim ? "PRIMARY" : "NON-PRIMARY", my_idx, members); assert (0); return -ENOTRECOVERABLE; } gcs_group_state_t gcs_group_handle_comp_msg (gcs_group_t* group, const gcs_comp_msg_t* comp) { long new_idx, old_idx; gcs_node_t* new_nodes = NULL; ulong new_memb = 0; const bool prim_comp = gcs_comp_msg_primary (comp); const bool bootstrap = gcs_comp_msg_bootstrap(comp); const long new_my_idx = gcs_comp_msg_self (comp); const long new_nodes_num = gcs_comp_msg_num (comp); { int const err(group_check_comp_msg(prim_comp, new_my_idx,new_nodes_num)); if (err) return gcs_group_state_t(err); } if (new_my_idx >= 0) { gu_info ("New COMPONENT: primary = %s, bootstrap = %s, my_idx = %ld, " "memb_num = %ld", prim_comp ? "yes" : "no", bootstrap ? "yes" : "no", new_my_idx, new_nodes_num); new_nodes = group_nodes_init (group, comp); if (!new_nodes) { gu_fatal ("Could not allocate memory for %d-node component.", gcs_comp_msg_num (comp)); assert(0); return (gcs_group_state_t)-ENOMEM; } if (GCS_GROUP_PRIMARY == group->state) { gu_debug ("#281: Saving %s over %s", gcs_node_state_to_str(group->nodes[group->my_idx].status), gcs_node_state_to_str(group->prim_state)); group->prim_state = group->nodes[group->my_idx].status; } } else { // Self-leave message gu_info ("New SELF-LEAVE."); assert (0 == new_nodes_num); assert (!prim_comp); } bool my_bootstrap(bootstrap); if (prim_comp) { /* Got PRIMARY COMPONENT - Hooray! */ assert (new_my_idx >= 0); if (group->state == GCS_GROUP_PRIMARY) { /* we come from previous primary configuration, relax */ assert(group->my_idx >= 0); my_bootstrap = group->nodes[group->my_idx].bootstrap; } else if (bootstrap && gu_uuid_compare(&group->group_uuid, &GU_UUID_NIL)) { /* Is there need to initialize something else in this case? */ my_bootstrap = true; } else { const bool first_component = #ifndef GCS_CORE_TESTING (0 == group->num) || bootstrap; #else (0 == group->num); #endif if (1 == new_nodes_num && first_component) { /* bootstrap new configuration */ assert (GCS_GROUP_NON_PRIMARY == group->state); assert ((0 == group->num && -1 == group->my_idx) || /* if first comp was non prim due to group expulsion */ (1 == group->num && 0 == group->my_idx)); // This bootstraps initial primary component for state exchange gu_uuid_generate (&group->prim_uuid, NULL, 0); group->prim_seqno = 0; group->prim_num = 1; group->state = GCS_GROUP_PRIMARY; if (group->act_id_ < 0) { // no history provided: start a new one group->act_id_ = GCS_SEQNO_NIL; gu_uuid_generate (&group->group_uuid, NULL, 0); gu_info ("Starting new group from scratch: " GU_UUID_FORMAT, GU_UUID_ARGS(&group->group_uuid)); } group->last_applied = group->act_id_; assert(group->last_applied >= 0); new_nodes[0].status = GCS_NODE_STATE_JOINED; new_nodes[0].last_applied = group->last_applied; } } } else { group_go_non_primary (group); } /* Remap old node array to new one to preserve action continuity */ for (new_idx = 0; new_idx < new_nodes_num; new_idx++) { /* find member index in old component by unique member id */ for (old_idx = 0; old_idx < group->num; old_idx++) { // just scan through old group if (!strcmp(group->nodes[old_idx].id, new_nodes[new_idx].id)) { /* the node was in previous configuration with us */ /* move node context to new node array */ gcs_node_move (&new_nodes[new_idx], &group->nodes[old_idx]); break; } } /* if wasn't found in new configuration, new member - * need to do state exchange */ new_memb |= (old_idx == group->num); } /* free old nodes array */ group_nodes_free (group); group->my_idx = new_my_idx; group->num = new_nodes_num; group->nodes = new_nodes; assert(group->num > 0 || group->my_idx < 0); assert(group->my_idx >= 0 || group->num == 0); if (group->my_idx >= 0) group->nodes[group->my_idx].bootstrap = my_bootstrap; if (gcs_comp_msg_primary(comp) || bootstrap) { /* TODO: for now pretend that we always have new nodes and perform * state exchange because old states can carry outdated node status. * (also protocol voting needs to be redone) * However this means aborting ongoing actions. Find a way to avoid * this extra state exchange. Generate new state messages on behalf * of other nodes? see #238 */ new_memb = true; /* if new nodes joined, reset ongoing actions and state messages */ if (new_memb) { group_nodes_reset (group); group->state = GCS_GROUP_WAIT_STATE_UUID; group->state_uuid = GU_UUID_NIL; // prepare for state exchange } else { if (GCS_GROUP_PRIMARY == group->state) { /* since we don't have any new nodes since last PRIMARY, we skip state exchange */ int const err(group_post_state_exchange(group)); if (err) return gcs_group_state_t(err); } } if (group->quorum.gcs_proto_ver < 2) { // commit cut recomputation should happen only after state exchange group_redo_last_applied (group); } } return group->state; } gcs_group_state_t gcs_group_handle_uuid_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { assert (msg->size == sizeof(gu_uuid_t)); if (GCS_GROUP_WAIT_STATE_UUID == group->state && 0 == msg->sender_idx /* check that it is from the representative */) { gu_uuid_copy(&group->state_uuid, (const gu_uuid_t*)msg->buf); group->state = GCS_GROUP_WAIT_STATE_MSG; } else { gu_warn ("Stray state UUID msg: " GU_UUID_FORMAT " from node %d (%s), current group state %s", GU_UUID_ARGS((gu_uuid_t*)msg->buf), msg->sender_idx, group->nodes[msg->sender_idx].name, gcs_group_state_str[group->state]); } return group->state; } gcs_group_state_t gcs_group_handle_state_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { if (GCS_GROUP_WAIT_STATE_MSG == group->state) { gcs_state_msg_t* state = gcs_state_msg_read (msg->buf, msg->size); if (state) { char state_str[1024]; gcs_state_msg_snprintf(state_str, sizeof(state_str), state); const gu_uuid_t* state_uuid = gcs_state_msg_uuid (state); if (!gu_uuid_compare(&group->state_uuid, state_uuid)) { gu_info ("STATE EXCHANGE: got state msg: " GU_UUID_FORMAT " from %d (%s)", GU_UUID_ARGS(state_uuid), msg->sender_idx, gcs_state_msg_name(state)); gu_debug("%s", state_str); gcs_node_record_state (&group->nodes[msg->sender_idx], state); int const err(group_post_state_exchange(group)); if (err) return gcs_group_state_t(err); } else { gu_debug ("STATE EXCHANGE: stray state msg: " GU_UUID_FORMAT " from node %d (%s), current state UUID: " GU_UUID_FORMAT, GU_UUID_ARGS(state_uuid), msg->sender_idx, gcs_state_msg_name(state), GU_UUID_ARGS(&group->state_uuid)); gu_debug ("%s", state_str); gcs_state_msg_destroy (state); } } else { gu_warn ("Could not parse state message from node %d, %s", msg->sender_idx, group->nodes[msg->sender_idx].name); } } return group->state; } /* this is a helper function that takes care of preper interpretation of the * code message depending on the protocol version used. * @return 0 - success, -EMSGSIZE - wrong message size, -EINVAL - wrong group */ int group_unserialize_code_msg(gcs_group_t* group, const gcs_recv_msg_t* msg, gu::GTID& gtid, int64_t& code) { if (gu_likely(group->gcs_proto_ver >= 1 && msg->size >= gcs::core::CodeMsg::serial_size())) { const gcs::core::CodeMsg* const cm (static_cast(msg->buf)); cm->unserialize(gtid, code); if (gu_unlikely(gtid.uuid() != group->group_uuid)) { log_info << gcs_msg_type_string[msg->type] << " message " << *cm << " from another group (" << gtid.uuid() << "). Dropping message."; return -EINVAL; } } else // gcs_seqno_t { if (gu_likely(msg->size == sizeof(gcs_seqno_t))) { gtid.set(gu::gtoh(*(static_cast(msg->buf)))); code = 0; } else { log_warn << "Bogus size for " << gcs_msg_type_string[msg->type] << " message: " << msg->size << " bytes. Dropping message."; return -EMSGSIZE; } } return 0; } /*! Returns new last applied value if it has changes, 0 otherwise */ gcs_seqno_t gcs_group_handle_last_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { assert (GCS_MSG_LAST == msg->type); gu::GTID gtid; int64_t code; if (gu_unlikely(group_unserialize_code_msg(group, msg, gtid,code))) return 0; if (gu_unlikely(0 != code)) { log_warn << "Bogus " << gcs_msg_type_string[msg->type] << " message code: " << code <<". Ignored."; assert(0); return 0; } // This assert is too restrictive. It requires application to send // last applied messages while holding TO, otherwise there's a race // between threads. // assert (seqno >= group->last_applied); gcs_node_set_last_applied (&group->nodes[msg->sender_idx], gtid.seqno()); assert(group->nodes[msg->sender_idx].last_applied >= 0); log_debug << "Got last applied " << gtid.seqno() << " from " << msg->sender_idx << " (" << group->nodes[msg->sender_idx].name << "). Last node: " << group->last_node << " (" << (group->last_node >= 0 ? group->nodes[group->last_node].name : " ") << ")"; if (msg->sender_idx == group->last_node && gtid.seqno() > group->last_applied) { /* node that was responsible for the last value, has changed it. * need to recompute it */ gcs_seqno_t old_val = group->last_applied; group_redo_last_applied (group); if (old_val < group->last_applied) { gu_debug ("New COMMIT CUT %lld on %ld after %lld from %d", (long long)group->last_applied, group->my_idx, (long long)gtid.seqno(), msg->sender_idx); return group->last_applied; } } return 0; } /*! @return true if the node's vote must be counted */ static inline bool group_count_votes(const gcs_node_t& node) { return (node.count_last_applied && !node.stateless); } /* true if last vote was updated, false if not */ static bool group_recount_votes (gcs_group_t& group) { typedef std::pair VoteEntry; typedef std::map VoteCounts; //we want it consistently sorted typedef VoteCounts::const_iterator VoteCountsIt; bool voting(false); gcs_seqno_t voting_seqno(group.act_id_); for (int n(0); n < group.num; ++n) { const gcs_node_t& node(group.nodes[n]); if (group_count_votes(node) && node.vote_seqno > group.vote_result.seqno) { voting = true; if (node.vote_seqno < voting_seqno) voting_seqno = node.vote_seqno; } } if (!voting) return false; /* this can happen on config. change */ VoteCounts vc; int n_votes(0); int voters(0); for (int n(0); n < group.num; ++n) { gcs_node_t& node(group.nodes[n]); if (group_count_votes(node) || node.last_applied >= voting_seqno) { ++voters; if (node.vote_seqno >= voting_seqno || node.last_applied >= voting_seqno) { ++n_votes; /* If a node has voted on seqno > voting_seqno or * reported last appied on a seqno >= voting_seqno, * then its vote for the voting_seqno is 0 (success) */ uint64_t const vote (node.vote_seqno == voting_seqno ? node.vote_res : 0); vc.insert(VoteEntry(vote, 0)).first->second++; } } else { log_debug << "Excluding node from voters: " << node; } } assert(n_votes > 0); gu::GTID const vote_gtid(group.group_uuid, voting_seqno); std::ostringstream diag; diag << "Votes over " << vote_gtid << ":\n"; int max_count(0); int second_max(0); int zero_count(0); uint64_t max_vote(0); #ifndef NDEBUG int counts(0); #endif for (VoteCountsIt it(vc.begin()); it != vc.end(); ++it) { assert(it->second > 0); if (0 == it->first) zero_count = it->second; if (it->second >= max_count) { second_max = max_count; max_vote = it->first; max_count = it->second; } #ifndef NDEBUG counts += it->second; #endif diag << " " << gu::PrintBase<>(it->first) << ": " << std::setfill(' ') << std::setw(3) << it->second << '/' << std::setw(0) << voters << "\n"; } assert(counts == n_votes); assert(zero_count <= max_count); int const missing(voters - n_votes); uint64_t win_vote; if (group.quorum.vote_policy > 0 && zero_count >= int(group.quorum.vote_policy)) { win_vote = 0; } else if ((0 == group.quorum.vote_policy || (zero_count + missing < int(group.quorum.vote_policy))) && /* what is happening here: for zero vote to win it must be >= * than any other vote. Which requires any other vote to be * STRICTLY > in case zero count is the second runner up. Yet * it is sufficient to be >= otherwise. */ (zero_count >= second_max + missing /* zero_count == max_count */|| max_count >= second_max + missing + (zero_count == second_max))) { /* even if received, missing votes won't win over current max */ win_vote = (zero_count >= max_count ? 0 : max_vote); } else { diag << "Waiting for more votes."; log_info << diag.str(); assert(missing > 0); return false;; } diag << "Winner: " << gu::PrintBase<>(win_vote); log_info << diag.str(); group.vote_result.seqno = voting_seqno; group.vote_result.res = win_vote; const gcs_node_t& this_node(group.nodes[group.my_idx]); if (this_node.vote_seqno < voting_seqno) { // record voting result in the history for later std::pair const val(vote_gtid, win_vote); std::pair const res (group.vote_history.insert(val)); if (false == res.second) { assert(0); res.first->second = group.vote_result.res; } } return true; } VoteResult gcs_group_handle_vote_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { assert (GCS_MSG_VOTE == msg->type); gu::GTID gtid; int64_t code; gcs_node_t& sender(group->nodes[msg->sender_idx]); if (gu_unlikely(group_unserialize_code_msg(group, msg, gtid, code))) { log_warn << "Failed to deserialize vote msg from " << msg->sender_idx << " (" << sender.name << ")"; VoteResult const ret = { GCS_NO_VOTE_SEQNO, 0 }; return ret; } /* If either group-wide vote seqno or last applied are greater than the request seqno, the vote has either happened already or there was no need (i.e. all other members had a success). */ gcs_seqno_t const min_seqno = group->quorum.gcs_proto_ver >= 4 ? std::max(group->last_applied, group->vote_result.seqno) : group->vote_result.seqno; if (gtid.uuid() == group->group_uuid && gtid.seqno() > min_seqno) { const char* const data (gcs::core::CodeMsg::serial_size() < msg->size ? (static_cast(msg->buf) + gcs::core::CodeMsg::serial_size()) : NULL); /* voting on this seqno has not completed yet */ log_info << "Member " << msg->sender_idx << '(' << sender.name << ") " << (code ? "initiates" : "responds to") << " vote on " << gtid << ',' << gu::PrintBase<>(code) << ": " << (code ? (data ? data : "(null)") : "Success"); gcs_node_set_vote (&sender, gtid.seqno(), code, group->quorum.gcs_proto_ver); if (group_recount_votes(*group)) { /* What if group->vote_result.seqno < gtid.seqno()? * - that means that there is inconsistency between the sender and * the member who initiated voting on vote_result.seqno. This in turn * means that there will be a configuration change that will trigger * another votes recount, and then another configuration change * - until we reach gtid.senqo() */ if (group->nodes[group->my_idx].vote_seqno >= group->vote_result.seqno) { return group->vote_result; } } else if (gtid.seqno() > group->vote_request_seqno) { group->vote_request_seqno = gtid.seqno(); if (msg->sender_idx != group->my_idx) { VoteResult const ret = { gtid.seqno(), GCS_VOTE_REQUEST }; return ret; } } } else if (msg->sender_idx == group->my_idx) { std::ostringstream msg; msg << "Recovering vote result from history: " << gtid; int64_t result(0); VoteHistory::iterator it(group->vote_history.find(gtid)); if (group->vote_history.end() != it) { result = it->second; group->vote_history.erase(it); msg << ',' << gu::PrintBase<>(result); } else { msg << ": not found"; assert(code < 0); /* by default result is 0, which means success/no voting happened, * and this node is the only inconsistent one. */ } log_info << msg.str(); VoteResult const ret = { gtid.seqno(), result }; return ret; // this should wake up the thread that voted } else if (gtid.seqno() > group->vote_result.seqno) { /* outdated vote from another member, ignore */ log_info << "Outdated vote " << gu::PrintBase<>(code) << " for " << gtid; log_info << "(last group vote was on: " << gu::GTID(group->group_uuid, group->vote_result.seqno) << ',' << gu::PrintBase<>(group->vote_result.res) << ')'; } VoteResult const ret = { GCS_NO_VOTE_SEQNO, 0 }; // no action required return ret; } /*! return true if this node is the sender to notify the calling thread of * success */ int gcs_group_handle_join_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { int const sender_idx = msg->sender_idx; gcs_node_t* sender = &group->nodes[sender_idx]; assert (GCS_MSG_JOIN == msg->type); gu::GTID gtid; int64_t code; if (gu_unlikely(group_unserialize_code_msg(group, msg, gtid,code))) return 0; if (GCS_NODE_STATE_DONOR == sender->status || GCS_NODE_STATE_JOINER == sender->status) { int j; gcs_node_t* peer = NULL; const char* peer_id = NULL; const char* peer_name = "left the group"; int peer_idx = -1; bool from_donor = false; const char* st_dir = NULL; // state transfer direction symbol if (GCS_NODE_STATE_DONOR == sender->status) { peer_id = sender->joiner; from_donor = true; st_dir = "to"; assert (group->last_applied_proto_ver >= 0); if (0 == group->last_applied_proto_ver) { /* #454 - we don't switch to JOINED here, * instead going straignt to SYNCED */ } else { assert(sender->count_last_applied); assert(sender->desync_count > 0); sender->desync_count -= 1; if (0 == sender->desync_count) sender->status = GCS_NODE_STATE_JOINED; } } else { peer_id = sender->donor; st_dir = "from"; if (group->quorum.version < 2) { // #591 remove after quorum v1 is phased out sender->status = GCS_NODE_STATE_JOINED; group->prim_num++; } else { if (code >= 0) { sender->status = GCS_NODE_STATE_JOINED; group->prim_num++; } else { sender->status = GCS_NODE_STATE_PRIM; } } } // Try to find peer. for (j = 0; j < group->num; j++) { // #483 if (j == sender_idx) continue; if (!memcmp(peer_id, group->nodes[j].id, sizeof (group->nodes[j].id))) { peer_idx = j; peer = &group->nodes[peer_idx]; peer_name = peer->name; break; } } if (j == group->num && strlen(peer_id)) { /* This can happen if the 'peer' is no longer in group. */ gu_info ("Could not find peer: %s", peer_id); } if (code < 0) { gu_warn ("%d.%d (%s): State transfer %s %d.%d (%s) failed: %s", sender_idx, sender->segment, sender->name, st_dir, peer_idx, peer ? peer->segment : -1, peer_name, gcs_state_transfer_error_str((int)-code)); if (from_donor && peer_idx == group->my_idx && GCS_NODE_STATE_JOINER == group->nodes[peer_idx].status) { // this node will be waiting for SST forever. If it has only // one recv thread there is no (generic) way to wake it up. gu_fatal ("Will never receive state. Need to abort."); // return to core to shutdown the backend before aborting return -ENOTRECOVERABLE; } assert(group->quorum.version >= 2); if (group->quorum.version < 2 && !from_donor && // #591 sender_idx == group->my_idx) { // remove after quorum v1 is phased out gu_fatal ("Failed to receive state. Need to abort."); return -ENOTRECOVERABLE; } } else { if (GCS_NODE_STATE_JOINED == sender->status) { if (sender_idx == peer_idx) { gu_info("Member %d.%d (%s) resyncs itself to group.", sender_idx, sender->segment, sender->name); } else { gu_info("%d.%d (%s): State transfer %s %d.%d (%s) complete.", sender_idx, sender->segment, sender->name, st_dir, peer_idx, peer ? peer->segment : -1, peer_name); } } else { assert(sender->desync_count > 0); return 0; // don't deliver up } } } else { if (GCS_NODE_STATE_PRIM == sender->status) { gu_warn("Rejecting JOIN message from %d.%d (%s): new State Transfer" " required.", sender_idx, sender->segment, sender->name); } else if (GCS_NODE_STATE_SYNCED != sender->status && GCS_NODE_STATE_JOINED != sender->status) { /* According to comments in gcs_join(), sending of JOIN messages * is always allowed when not in JOINER state. This may lead to * duplicate joins of which some can be received in JOINED or * SYNCED state. This is expected, so the warning is not printed if * the state is JOINED or SYNCED, but we'll keep it for other * states to catch possible errors in sender logic. */ gu_warn("Protocol violation. JOIN message sender %d.%d (%s) is not " "in state transfer (%s). Message ignored.", sender_idx, sender->segment, sender->name, gcs_node_state_to_str(sender->status)); } return 0; } return (sender_idx == group->my_idx); } /* @return true if this node is sender, false otherwise */ int gcs_group_handle_sync_msg (gcs_group_t* group, const gcs_recv_msg_t* msg) { int const sender_idx = msg->sender_idx; gcs_node_t* sender = &group->nodes[sender_idx]; assert (GCS_MSG_SYNC == msg->type); gu::GTID gtid; int64_t code; if (gu_unlikely(group_unserialize_code_msg(group, msg, gtid,code))) return 0; if (GCS_NODE_STATE_JOINED == sender->status || /* #454 - at this layer we jump directly from DONOR to SYNCED */ (0 == group->last_applied_proto_ver && GCS_NODE_STATE_DONOR == sender->status)) { sender->status = GCS_NODE_STATE_SYNCED; sender->count_last_applied = group_count_stateless(*group, *sender); group_redo_last_applied (group); //from now on this node must be counted gu_info ("Member %d.%d (%s) synced with group.", sender_idx, sender->segment, sender->name); return (sender_idx == group->my_idx); } else { if (GCS_NODE_STATE_SYNCED == sender->status) { gu_debug ("Redundant SYNC message from %d.%d (%s).", sender_idx, sender->segment, sender->name); } else if (GCS_NODE_STATE_DONOR == sender->status) { // this is possible with quick succession of desync()/resync() calls gu_debug ("SYNC message from %d.%d (%s, DONOR). Ignored.", sender_idx, sender->segment, sender->name); } else { gu_warn ("SYNC message from non-JOINED %d.%d (%s, %s). Ignored.", sender_idx, sender->segment, sender->name, gcs_node_state_to_str(sender->status)); } /* signal sender that it didn't work */ return -ERESTART * (sender_idx == group->my_idx); } } static inline bool group_node_is_stateful (const gcs_group_t* group, const gcs_node_t* node) { if (group->quorum.version < 3) { return strcmp (node->name, GCS_ARBITRATOR_NAME); } else { return (!node->stateless); } } static int group_find_node_by_state (const gcs_group_t* const group, int const joiner_idx, gcs_node_state_t const status) { gcs_segment_t const segment = group->nodes[joiner_idx].segment; int idx; int donor = -1; bool hnss = false; /* have nodes in the same segment */ for (idx = 0; idx < group->num; idx++) { if (joiner_idx == idx) continue; /* skip joiner */ gcs_node_t* node = &group->nodes[idx]; if (!group_node_is_stateful(group, node)) continue; if (node->status >= status) { donor = idx; /* potential donor */ } if (segment == node->segment) { if (donor == idx) return donor; /* found suitable donor in the * same segment */ if (node->status >= GCS_NODE_STATE_JOINER) hnss = true; } } /* Have not found suitable donor in the same segment. */ if (!hnss && donor >= 0) { if (joiner_idx == group->my_idx) { gu_info ("There are no nodes in the same segment that will ever " "be able to become donors, yet there is a suitable donor " "outside. Will use that one."); } return donor; } else { /* wait for a suitable donor to appear in the same segment */ return -EAGAIN; } } static int group_find_node_by_name (const gcs_group_t* const group, int const joiner_idx, const char* const name, int const name_len, gcs_node_state_t const status) { int idx; for (idx = 0; idx < group->num; idx++) { gcs_node_t* node = &group->nodes[idx]; if (!group_node_is_stateful(group, node)) continue; if (!strncmp(node->name, name, name_len)) { if (joiner_idx == idx) { return -EHOSTDOWN; } else if (node->status >= status) { return idx; } else if (node->status >= GCS_NODE_STATE_JOINER) { /* will eventually become SYNCED */ return -EAGAIN; } else { /* technically we could return -EDEADLK here, but as long as * it is not -EAGAIN, it does not matter. If the node is in a * PRIMARY state, it is as good as not found. */ break; } } } return -EHOSTUNREACH; } /* Calls group_find_node_by_name() for each name in comma-separated list, * falls back to group_find_node_by_state() if name (or list) is empty. */ static int group_for_each_donor_in_string (const gcs_group_t* const group, int const str_version, int const joiner_idx, const char* const str, int const str_len, gcs_node_state_t const status) { assert (str != NULL); const char* begin = str; const char* end; int err = -EHOSTDOWN; /* worst error */ /* dangling comma */ bool const dcomma = (str_len && str[str_len-1] == ',' && str_version >= 2); do { end = strchr(begin, ','); int len; if (NULL == end) { len = str_len - (begin - str); } else { len = end - begin; } assert (len >= 0); int idx; if (len > 0) { idx = group_find_node_by_name (group, joiner_idx, begin, len, status); } else { if (err == -EAGAIN && !dcomma) { /* -EAGAIN here means that at least one of the nodes in the * list will be available later, so don't try others. * (Proto 1 UPDATE: unless there is a dangling comma) */ idx = err; } else { idx = group_find_node_by_state(group, joiner_idx, status); } } if (idx >= 0) return idx; /* once we hit -EAGAIN, don't try to change error code: this means * that at least one of the nodes in the list will become available. */ if (-EAGAIN != err) err = idx; if (end) begin = end + 1; /* skip comma */ } while (end != NULL); return err; } static gcs_seqno_t group_lowest_cached_seqno(const gcs_group_t* const group) { gcs_seqno_t ret = GCS_SEQNO_ILL; int idx = 0; for (idx = 0; idx < group->num; idx++) { gcs_seqno_t seq = gcs_node_cached(&group->nodes[idx]); if (seq != GCS_SEQNO_ILL) { if (ret == GCS_SEQNO_ILL || seq < ret) { ret = seq; } } } return ret; } static int group_find_ist_donor_by_name (const gcs_group_t* const group, int joiner_idx, const char* name, int name_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { int idx = 0; for (idx = 0; idx < group->num; idx++) { gcs_node_t* node = &group->nodes[idx]; gcs_seqno_t cached = gcs_node_cached(node); if (strncmp(node->name, name, name_len) == 0 && joiner_idx != idx && node->status >= status && cached != GCS_SEQNO_ILL && // ist potentially possible (ist_seqno + 1) >= cached) { return idx; } } return -1; } static int group_find_ist_donor_by_name_in_string ( const gcs_group_t* const group, int joiner_idx, const char* str, int str_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { assert (str != NULL); const char* begin = str; const char* end; gu_debug("ist_seqno[%lld]", (long long)ist_seqno); // return the highest cached seqno node. int ret = -1; do { end = strchr(begin, ','); int len = 0; if (end == NULL) { len = str_len - (begin - str); } else { len = end - begin; } assert (len >= 0); if (len == 0) break; int idx = group_find_ist_donor_by_name( group, joiner_idx, begin, len, ist_seqno, status); if (idx >= 0) { if (ret == -1 || gcs_node_cached(&group->nodes[idx]) >= gcs_node_cached(&group->nodes[ret])) { ret = idx; } } if (end) begin = end + 1; } while (end != NULL); if (ret == -1) { gu_debug("not found"); } else { gu_debug("found. name[%s], seqno[%lld]", group->nodes[ret].name, (long long)gcs_node_cached(&group->nodes[ret])); } return ret; } static int group_find_ist_donor_by_state (const gcs_group_t* const group, int joiner_idx, gcs_seqno_t ist_seqno, gcs_node_state_t status) { gcs_node_t* joiner = &group->nodes[joiner_idx]; gcs_segment_t joiner_segment = joiner->segment; // find node who is ist potentially possible. // first highest cached seqno local node. // then highest cached seqno remote node. int idx = 0; int local_idx = -1; int remote_idx = -1; for (idx = 0; idx < group->num; idx++) { if (joiner_idx == idx) continue; gcs_node_t* const node = &group->nodes[idx]; gcs_seqno_t const node_cached = gcs_node_cached(node); if (node->status >= status && group_node_is_stateful(group, node) && node_cached != GCS_SEQNO_ILL && node_cached <= (ist_seqno + 1)) { int* const idx_ptr = (joiner_segment == node->segment) ? &local_idx : &remote_idx; if (*idx_ptr == -1 || node_cached >= gcs_node_cached(&group->nodes[*idx_ptr])) { *idx_ptr = idx; } } } if (local_idx >= 0) { gu_debug("local found. name[%s], seqno[%lld]", group->nodes[local_idx].name, (long long)gcs_node_cached(&group->nodes[local_idx])); return local_idx; } if (remote_idx >= 0) { gu_debug("remote found. name[%s], seqno[%lld]", group->nodes[remote_idx].name, (long long)gcs_node_cached(&group->nodes[remote_idx])); return remote_idx; } gu_debug("not found."); return -1; } static int group_find_ist_donor (const gcs_group_t* const group, int str_version, int joiner_idx, const char* str, int str_len, gcs_seqno_t ist_seqno, gcs_node_state_t status) { int idx = -1; gcs_seqno_t conf_seqno = group->quorum.act_id; gcs_seqno_t lowest_cached_seqno = group_lowest_cached_seqno(group); if (lowest_cached_seqno == GCS_SEQNO_ILL) { gu_debug("fallback to sst. lowest_cached_seqno == GCS_SEQNO_ILL"); return -1; } gcs_seqno_t const max_cached_range = conf_seqno - lowest_cached_seqno; gcs_seqno_t safety_gap = max_cached_range >> 7; /* 1.0 / 128 ~= 0.008 */ safety_gap = safety_gap < (1 << 20) ? safety_gap : (1 << 20); /* Be sensible and don't reserve more than 1M */ gcs_seqno_t safe_ist_seqno = lowest_cached_seqno + safety_gap; gu_debug("ist_seqno[%lld], lowest_cached_seqno[%lld]," "conf_seqno[%lld], safe_ist_seqno[%lld]", (long long)ist_seqno, (long long)lowest_cached_seqno, (long long)conf_seqno, (long long)safe_ist_seqno); if (ist_seqno < safe_ist_seqno) { // unsafe to perform ist. gu_debug("fallback to sst. ist_seqno < safe_ist_seqno"); return -1; } if (str_len) { // find ist donor by name. idx = group_find_ist_donor_by_name_in_string( group, joiner_idx, str, str_len, ist_seqno, status); if (idx >= 0) return idx; } // find ist donor by status. idx = group_find_ist_donor_by_state( group, joiner_idx, ist_seqno, status); if (idx >= 0) return idx; return -1; } int gcs_group_find_donor(const gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, int const donor_len, const gu::GTID& ist_gtid) { static gcs_node_state_t const min_donor_state = GCS_NODE_STATE_SYNCED; /* try to find ist donor first. if it fails, fallbacks to find sst donor*/ int donor_idx = -1; if (str_version >= 2 && ist_gtid.uuid() == group->group_uuid && ist_gtid.seqno() != GCS_SEQNO_ILL) { // FIXME: check if disabling the assertion and allowing ist_seqno to // equal to GCS_SEQNO_ILL requires protocol upgrade // assert(ist_seqno != GCS_SEQNO_ILL); donor_idx = group_find_ist_donor(group, str_version, joiner_idx, donor_string, donor_len, ist_gtid.seqno(), min_donor_state); } if (donor_idx < 0) { /* if donor_string is empty, it will fallback to find_node_by_state() */ donor_idx = group_for_each_donor_in_string (group, str_version, joiner_idx, donor_string, donor_len, min_donor_state); } return donor_idx; } /*! * Selects and returns the index of state transfer donor, if available. * Updates donor and joiner status if state transfer is possible * * @return * donor index or negative error code: * -EHOSTUNREACH if reqiested donor is not available * -EAGAIN if there were no nodes in the proper state. */ static int group_select_donor (gcs_group_t* group, int const str_version, int const joiner_idx, const char* const donor_string, const gu::GTID& ist_gtid, bool const desync) { static gcs_node_state_t const min_donor_state = GCS_NODE_STATE_SYNCED; int donor_idx; int const donor_len = strlen(donor_string); bool const required_donor = (donor_len > 0); if (desync) { /* sender wants to become "donor" itself */ assert(donor_len > 0); gcs_node_state_t const st(group->nodes[joiner_idx].status); if (st >= min_donor_state || (st >= GCS_NODE_STATE_DONOR && group->quorum.version >= 4)) { donor_idx = joiner_idx; gcs_node_t& donor(group->nodes[donor_idx]); assert(donor.desync_count == 0 || group->quorum.version >= 4); assert(donor.desync_count == 0 || st == GCS_NODE_STATE_DONOR); (void)donor; // keep optimised build happy } else donor_idx = -EAGAIN; } else { donor_idx = gcs_group_find_donor(group, str_version, joiner_idx, donor_string, donor_len, ist_gtid); } if (donor_idx >= 0) { assert(donor_idx != joiner_idx || desync); gcs_node_t* const joiner = &group->nodes[joiner_idx]; gcs_node_t* const donor = &group->nodes[donor_idx]; donor->desync_count += 1; if (desync && 1 == donor->desync_count) { gu_info ("Member %d.%d (%s) desyncs itself from group", donor_idx, donor->segment, donor->name); } else if (!desync) { gu_info ("Member %d.%d (%s) requested state transfer from '%s'. " "Selected %d.%d (%s)(%s) as donor.", joiner_idx, joiner->segment, joiner->name, required_donor ? donor_string : "*any*", donor_idx, donor->segment, donor->name, gcs_node_state_to_str(donor->status)); } // reserve donor, confirm joiner (! assignment order is significant !) joiner->status = GCS_NODE_STATE_JOINER; donor->status = GCS_NODE_STATE_DONOR; if (1 == donor->desync_count) { /* SST or first desync */ memcpy (donor->joiner, joiner->id, GCS_COMP_MEMB_ID_MAX_LEN+1); memcpy (joiner->donor, donor->id, GCS_COMP_MEMB_ID_MAX_LEN+1); } else { assert(true == desync); } } else if (-donor_idx == EAGAIN) { /* In case of EAGAIN the failure of selecting the donor is * transient, and donor selection may succeed when the request is * retried by the Joiner. Therefore print info level message * instead of warning. */ gu_info("Member %d.%d (%s) requested state transfer from '%s', " "but it is impossible to select State Transfer donor: %s", joiner_idx, group->nodes[joiner_idx].segment, group->nodes[joiner_idx].name, required_donor ? donor_string : "*any*", gcs_state_transfer_error_str(-donor_idx)); } else { gu_warn("Member %d.%d (%s) requested state transfer from '%s', " "but it is impossible to select State Transfer donor: %s", joiner_idx, group->nodes[joiner_idx].segment, group->nodes[joiner_idx].name, required_donor ? donor_string : "*any*", gcs_state_transfer_error_str(-donor_idx)); } return donor_idx; } /* Cleanup ignored state request */ void gcs_group_ignore_action (gcs_group_t* group, struct gcs_act_rcvd* act) { gu_debug("Ignoring action: buf: %p, len: %zd, type: %d, sender: %d, " "seqno: %" PRId64, act->act.buf, act->act.buf_len, act->act.type, act->sender_idx, act->id); if (act->act.type <= GCS_ACT_CCHANGE) { gcs_gcache_free (group->cache, act->act.buf); } act->act.buf = NULL; act->act.buf_len = 0; act->act.type = GCS_ACT_ERROR; act->sender_idx = -1; assert (GCS_SEQNO_ILL == act->id); } static bool group_desync_request (const char* const donor) { return (strlen (GCS_DESYNC_REQ) == strlen(donor) && !strcmp(GCS_DESYNC_REQ, donor)); } /* NOTE: check gcs_request_state_transfer() for sender part. */ /*! Returns 0 if request is ignored, request size if it should be passed up */ int gcs_group_handle_state_request (gcs_group_t* group, struct gcs_act_rcvd* act) { // pass only to sender and to one potential donor const char* const donor_name = (const char*)act->act.buf; size_t const donor_name_len = strlen(donor_name) + 1; int donor_idx = -1; int const joiner_idx = act->sender_idx; const char* joiner_name = group->nodes[joiner_idx].name; gcs_node_state_t joiner_status = group->nodes[joiner_idx].status; bool const desync = group_desync_request (donor_name); gu::GTID ist_gtid; int str_version = 1; // actually it's 0 or 1. if (act->act.buf_len > (ssize_t)(donor_name_len + 1) && donor_name[donor_name_len] == 'V') { str_version = (int)donor_name[donor_name_len + 1]; } if (str_version >= 2) { ssize_t const ist_offset(donor_name_len + 2); ssize_t const sst_offset(ist_offset + gu::GTID::serial_size()); try { if (act->act.buf_len < sst_offset) { gu_throw_error(EINVAL) << "Request message too short: " << act->act.buf_len << " < " << sst_offset; } ssize_t const offset (ist_gtid.unserialize(act->act.buf, act->act.buf_len, ist_offset)); if (offset != sst_offset) { gu_throw_error(EINVAL) << "Actual SST offset " << offset << " does not match expected " << sst_offset; } } catch (gu::Exception& e) { if (group->my_idx == joiner_idx) { log_fatal << "Failed to form State Transfer Request: " << e.what(); act->id = -ENOTRECOVERABLE; return act->act.buf_len; } else { log_warn << "Malformed State Transfer Request: " << e.what() << " Ignoring"; gcs_group_ignore_action(group, act); return 0; } } // change act.buf's content to original version. // and it's safe to change act.buf_len ::memmove((char*)act->act.buf + donor_name_len, (char*)act->act.buf + sst_offset, act->act.buf_len - sst_offset); act->act.buf_len -= sst_offset - donor_name_len; } assert (GCS_ACT_STATE_REQ == act->act.type); if (joiner_status != GCS_NODE_STATE_PRIM && !desync) { const char* joiner_status_string = gcs_node_state_to_str(joiner_status); if (group->my_idx == joiner_idx) { if (joiner_status >= GCS_NODE_STATE_JOINED) { gu_warn ("Requesting state transfer while in %s. " "Ignoring.", joiner_status_string); act->id = -ECANCELED; } else { /* The node can't send two STRs in a row */ assert(joiner_status == GCS_NODE_STATE_JOINER); gu_fatal("Requesting state transfer while in %s. " "Internal program error.", joiner_status_string); act->id = -ENOTRECOVERABLE; } return act->act.buf_len; } else { gu_warn ("Member %d.%d (%s) requested state transfer, " "but its state is %s. Ignoring.", joiner_idx, group->nodes[joiner_idx].segment, joiner_name, joiner_status_string); gcs_group_ignore_action (group, act); return 0; } } donor_idx = group_select_donor(group, str_version, joiner_idx, donor_name, ist_gtid, desync); assert (donor_idx != joiner_idx || desync || donor_idx < 0); assert (donor_idx == joiner_idx || !desync || donor_idx < 0); if (group->my_idx != joiner_idx && group->my_idx != donor_idx) { // if neither DONOR nor JOINER, ignore request gcs_group_ignore_action (group, act); return 0; } else if (group->my_idx == donor_idx) { act->act.buf_len -= donor_name_len; memmove (*(void**)&act->act.buf, ((char*)act->act.buf) + donor_name_len, act->act.buf_len); // now action starts with request, like it was supplied by application, // see gcs_request_state_transfer() } // Return index of donor (or error) in the seqno field to sender. // It will be used to detect error conditions (no availabale donor, // donor crashed and the like). // This may be ugly, well, any ideas? act->id = donor_idx; return act->act.buf_len; } /* Creates new configuration action */ ssize_t gcs_group_act_conf (gcs_group_t* group, struct gcs_act_rcvd* rcvd, int* gcs_proto_ver) { // if (*gcs_proto_ver < group->quorum.gcs_proto_ver) // *gcs_proto_ver = group->quorum.gcs_proto_ver; // only go up, see #482 // else if (group->quorum.gcs_proto_ver >= 0 && // group->quorum.gcs_proto_ver < *gcs_proto_ver) { // gu_warn ("Refusing GCS protocol version downgrade from %d to %d", // *gcs_proto_ver, group->quorum.gcs_proto_ver); // } // actually we allow gcs protocol version downgrade. // because if message version is inconsistent with gcs protocol version // gcs requires resending message with correct gcs protocol version. *gcs_proto_ver = group->quorum.gcs_proto_ver; struct gcs_act_cchange conf; if (GCS_GROUP_PRIMARY == group->state) { if (group->quorum.gcs_proto_ver >= 1) { ++group->act_id_; if (group_recount_votes(*group)) { conf.vote_seqno = group->vote_result.seqno; conf.vote_res = group->vote_result.res; } } } else { assert(GCS_GROUP_NON_PRIMARY == group->state); } conf.seqno = group->act_id_; conf.conf_id = group->conf_id; conf.repl_proto_ver = group->quorum.repl_proto_ver; conf.appl_proto_ver = group->quorum.appl_proto_ver; memcpy (conf.uuid.data, &group->group_uuid, sizeof (gu_uuid_t)); if (group->num) { assert (group->my_idx >= 0); for (int idx = 0; idx < group->num; ++idx) { gcs_act_cchange::member m; gu_uuid_scan(group->nodes[idx].id, strlen(group->nodes[idx].id), &m.uuid_); m.name_ = group->nodes[idx].name; m.incoming_ = group->nodes[idx].inc_addr; m.cached_ = gcs_node_cached(&group->nodes[idx]); m.state_ = group->nodes[idx].status; conf.memb.push_back(m); } } else { // self leave message assert (conf.conf_id < 0); assert (-1 == group->my_idx); } void* tmp; rcvd->act.buf_len = conf.write(&tmp); // throws when fails #ifndef GCS_FOR_GARB /* copy CC event to gcache for IST */ rcvd->act.buf = gcache_malloc(group->cache, rcvd->act.buf_len); if (rcvd->act.buf) { memcpy(const_cast(rcvd->act.buf), tmp, rcvd->act.buf_len); rcvd->id = group->my_idx; // passing own index in seqno_g } else { rcvd->act.buf_len = -ENOMEM; rcvd->id = -ENOMEM; } free(tmp); #else rcvd->act.buf = tmp; rcvd->id = group->my_idx; #endif /* GCS_FOR_GARB */ rcvd->act.type = GCS_ACT_CCHANGE; return rcvd->act.buf_len; } // for future use in fake state exchange (in unit tests et.al. See #237, #238) static gcs_state_msg_t* group_get_node_state (const gcs_group_t* const group, long const node_idx) { const gcs_node_t* const node = &group->nodes[node_idx]; uint8_t flags = 0; if (0 == node_idx) flags |= GCS_STATE_FREP; if (node->count_last_applied) flags |= GCS_STATE_FCLA; if (node->bootstrap) flags |= GCS_STATE_FBOOTSTRAP; if (node->stateless) flags |= GCS_STATE_FSTATELESS; #ifdef GCS_FOR_GARB int64_t const cached = GCS_SEQNO_ILL; #else int64_t const cached = /* group->cache check is needed for unit tests */ group->cache ? gcache_seqno_min(group->cache) : GCS_SEQNO_ILL; #endif /* GCS_FOR_GARB */ return gcs_state_msg_create ( &group->state_uuid, &group->group_uuid, &group->prim_uuid, group->prim_seqno, group->act_id_, cached, group->last_applied, // should be the same global property as act_id_ node->vote_seqno, node->vote_res, group->vote_policy, group->prim_num, group->prim_state, node->status, node->name, node->inc_addr, node->gcs_proto_ver, node->repl_proto_ver, node->appl_proto_ver, group->prim_gcs_ver, group->prim_repl_ver, group->prim_appl_ver, node->desync_count, flags ); } /*! Returns state message object for this node */ gcs_state_msg_t* gcs_group_get_state (const gcs_group_t* group) { return group_get_node_state (group, group->my_idx); } int gcs_group_param_set(gcs_group_t& group, const std::string& key, const std::string& val) { if (GCS_VOTE_POLICY_KEY == key) { gu_throw_error(ENOTSUP) << "Setting '" << key << "' in runtime may " "have unintended consequences and is currently not supported. " "Cluster voting policy should be decided on before starting the " "cluster."; } if (GCS_STATELESS_KEY == key) { gu_throw_error(ENOTSUP) << "Setting '" << key << "' in runtime may " "have unintended consequences and is currently not supported."; } if (GCS_CHECK_APPL_PROTO_KEY == key) { group.cnf.set(key, val); // in case of error throws like above return 0; } return 1; } void gcs_group_get_status (const gcs_group_t* group, gu::Status& status) { int desync_count; // make sure it is not initialized if (gu_likely(group->my_idx >= 0)) { const gcs_node_t& this_node(group->nodes[group->my_idx]); desync_count = this_node.desync_count; } else { desync_count = 0; } status.insert("desync_count", gu::to_string(desync_count)); } galera-4-26.4.25/gcs/src/gcs_act_proto.cpp000644 000164 177776 00000007646 15107057155 021426 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to action protocol * (to be extended to support protocol versions, currently supports only v0) */ #include #include "gcs_act_proto.hpp" /* Version 0 header structure bytes: 00 01 07 08 11 12 15 16 19 20 +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--- |PV| act_id | act_size | frag_no |AT|reserved| data... +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--- PV - protocol version AT - action type */ static const size_t PROTO_PV_OFFSET = 0; static const size_t PROTO_AT_OFFSET = 16; static const size_t PROTO_DATA_OFFSET = 20; // static const size_t PROTO_ACT_ID_OFFSET = 0; // static const size_t PROTO_ACT_SIZE_OFFSET = 8; // static const size_t PROTO_FRAG_NO_OFFSET = 12; // static const gcs_seqno_t PROTO_ACT_ID_MAX = 0x00FFFFFFFFFFFFLL; // static const unsigned int PROTO_FRAG_NO_MAX = 0xFFFFFFFF; // static const unsigned char PROTO_AT_MAX = 0xFF; #define PROTO_MAX_HDR_SIZE PROTO_DATA_OFFSET // for now /*! Writes header data into actual header of the message. * Remainig fragment buf and length is in frag->frag and frag->frag_len * * @return 0 on success */ long gcs_act_proto_write (gcs_act_frag_t* frag, void* buf, size_t buf_len) { #ifdef GCS_DEBUG_PROTO if ((frag->act_id > PROTO_ACT_ID_MAX) || (frag->act_size > GCS_MAX_ACT_SIZE) || (frag->frag_no > PROTO_FRAG_NO_MAX) || (frag->act_type > PROTO_AT_MAX)) { gu_error ("Exceeded protocol limits: %d(%d), %d(%d), %d(%d), %d(%d)", frag->act_id, PROTO_ACT_ID_MAX, frag->act_size, GCS_MAX_ACT_SIZE, frag->frag_no, PROTO_FRAG_NO_MAX, frag->act_type, PROTO_AT_MAX); return -EOVERFLOW; } if (frag->proto_ver > GCS_PROTO_MAX) return -EPROTO; if (buf_len < PROTO_DATA_OFFSET) return -EMSGSIZE; #endif // assert (frag->act_size <= PROTO_ACT_SIZE_MAX); ((uint64_t*)buf)[0] = gu_be64(frag->act_id); ((uint32_t*)buf)[2] = htogl ((uint32_t)frag->act_size); ((uint32_t*)buf)[3] = htogl (frag->frag_no); ((uint8_t *)buf)[PROTO_PV_OFFSET] = frag->proto_ver; ((uint8_t *)buf)[PROTO_AT_OFFSET] = frag->act_type; frag->frag = (uint8_t*)buf + PROTO_DATA_OFFSET; frag->frag_len = buf_len - PROTO_DATA_OFFSET; return 0; } /*! Reads header data from the actual header of the message * Remainig fragment buf and length is in frag->frag and frag->frag_len * * @return 0 on success */ long gcs_act_proto_read (gcs_act_frag_t* frag, const void* buf, size_t buf_len) { frag->proto_ver = ((uint8_t*)buf)[PROTO_PV_OFFSET]; if (gu_unlikely(buf_len < PROTO_DATA_OFFSET)) { gu_error ("Action message too short: %zu, expected at least %zu", buf_len, PROTO_DATA_OFFSET); return -EBADMSG; } if (gu_unlikely(frag->proto_ver > GCS_PROTO_MAX)) { gu_error ("Bad protocol version %d, maximum supported %d", frag->proto_ver, GCS_PROTO_MAX); return -EPROTO; // this fragment should be dropped } ((uint8_t*)buf)[PROTO_PV_OFFSET] = 0x0; frag->act_id = gu_be64(*(uint64_t*)buf); frag->act_size = gtohl (((uint32_t*)buf)[2]); frag->frag_no = gtohl (((uint32_t*)buf)[3]); frag->act_type = static_cast( ((uint8_t*)buf)[PROTO_AT_OFFSET]); frag->frag = ((uint8_t*)buf) + PROTO_DATA_OFFSET; frag->frag_len = buf_len - PROTO_DATA_OFFSET; /* return 0 or -EMSGSIZE */ return ((frag->act_size > GCS_MAX_ACT_SIZE) * -EMSGSIZE); } /*! Returns protocol header size */ long gcs_act_proto_hdr_size (long version) { if (gu_unlikely(GCS_PROTO_MAX < version)) return -EPROTONOSUPPORT; if (gu_unlikely(version < 0)) return PROTO_MAX_HDR_SIZE; // safe return PROTO_DATA_OFFSET; } galera-4-26.4.25/gcs/src/gcs_defrag.cpp000644 000164 177776 00000015722 15107057155 020656 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2024 Codership Oy * * $Id$ */ #include "gcs_defrag.hpp" #include #include #include #include #define DF_ALLOC() \ do { \ df->head = static_cast(gcs_gcache_malloc(df->cache,df->size));\ \ if(gu_likely(df->head != NULL)) \ df->tail = df->head; \ else { \ gu_error ("Could not allocate memory for new " \ "action of size: %zd", df->size); \ return -ENOMEM; \ } \ } while (0) /*! * Handle action fragment * * Unless a whole action is returned, contents of act is undefined * * In order to optimize branch prediction used gu_likely macros and odered and * nested if/else blocks according to branch probability. * * @return 0 - success, * size of action - success, full action received, * negative - error. * * TODO: this function is too long, figure out a way to factor it into several * smaller ones. Note that it is called for every GCS_MSG_ACTION message * so it should be optimal. */ ssize_t gcs_defrag_handle_frag (gcs_defrag_t* df, const gcs_act_frag_t* frg, struct gcs_act* act, bool local) { if (df->received) { /* another fragment of existing action */ df->frag_no++; /* detect possible error condition */ if (gu_unlikely((df->sent_id != frg->act_id) || (df->frag_no != frg->frag_no))) { if (local && df->reset && (df->sent_id == frg->act_id) && (0 == frg->frag_no)) { /* df->sent_id was aborted halfway and is being taken care of * by the sender thread. Forget about it. * Reinit counters and continue with the new action. */ gu_debug("Local action %" PRId64 ", size %zu reset.", frg->act_id, frg->act_size); df->frag_no = 0; df->received = 0; df->tail = df->head; df->reset = false; if (df->size != frg->act_size) { df->size = frg->act_size; #ifndef GCS_FOR_GARB if (df->cache !=NULL) { gcache_free (df->cache, df->head); } else { free ((void*)df->head); } DF_ALLOC(); #endif /* GCS_FOR_GARB */ } } else if (frg->act_id == df->sent_id && frg->frag_no < df->frag_no) { /* gh172: tolerate duplicate fragments in production. */ gu_warn("Duplicate fragment %" PRId64 ":%ld, expected %" PRId64 ":%ld. " "Skipping.", frg->act_id, frg->frag_no, df->sent_id, df->frag_no); df->frag_no--; // revert counter in hope that we get good frag assert(0); return 0; } else { gu_error ("Unordered fragment received. Protocol error."); gu_error("Expected: %" PRId64 ":%ld, received: %" PRId64 ":%ld", df->sent_id, df->frag_no, frg->act_id, frg->frag_no); gu_error("Contents: '%.*s'", static_cast(frg->frag_len), (char*)frg->frag); df->frag_no--; // revert counter in hope that we get good frag assert(0); return -EPROTO; } } } else { /* new action */ if (gu_likely(0 == frg->frag_no)) { df->size = frg->act_size; df->sent_id = frg->act_id; df->reset = false; #ifndef GCS_FOR_GARB DF_ALLOC(); #else /* we don't store actions locally at all */ df->head = NULL; df->tail = df->head; #endif } else { /* not a first fragment */ if (!local && df->reset) { /* can happen after configuration change, just ignore this message calmly */ gu_debug("Ignoring fragment %" PRId64 ":%ld (size %zu) after reset", frg->act_id, frg->frag_no, frg->act_size); return 0; } else { ((char*)frg->frag)[frg->frag_len - 1] = '\0'; gu_error ("Unordered fragment received. Protocol error."); gu_error("Expected: any:0(first), received: %" PRId64 ":%lu", frg->act_id, frg->frag_no); gu_error("Contents: '%s', local: %s, reset: %s", (char*)frg->frag, local ? "yes" : "no", df->reset ? "yes" : "no"); assert(0); return -EPROTO; } } } df->received += frg->frag_len; assert (df->received <= df->size); #ifndef GCS_FOR_GARB assert (df->tail); memcpy (df->tail, frg->frag, frg->frag_len); df->tail += frg->frag_len; #else /* we skip memcpy since have not allocated any buffer */ assert (NULL == df->tail); assert (NULL == df->head); #endif #if 1 if (df->received == df->size) { act->buf = df->head; act->buf_len = df->received; gcs_defrag_init (df, df->cache); return act->buf_len; } else { return 0; } #else /* Refs gh185. Above original logic is preserved which relies on resetting * group->frag_reset when local action needs to be resent. However a proper * solution seems to be to use reset flag of own defrag channel (at least * it is per channel, not global like group->frag_reset). This proper logic * is shown below. Note that for it to work gcs_group_handle_act_msg() * must be able to handle -ERESTART return code. */ int ret; if (df->received == df->size) { act->buf = df->head; act->buf_len = df->received; if (gu_likely(!df->reset)) { ret = act->buf_len; } else { /* foreign action should simply never get here, only local actions * are allowed to complete in reset state (to return -ERESTART) to * a sending thread. */ assert(local); ret = -ERESTART; } gcs_defrag_init (df, df->cache); // this also clears df->reset flag assert(!df->reset); } else { ret = 0; } return ret; #endif } galera-4-26.4.25/gcs/src/gcs_comp_msg.cpp000644 000164 177776 00000007325 15107057155 021232 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2013 Codership Oy * * $Id$ */ /* * Interface to membership messages - implementation * */ #include #include #include #include #define GCS_COMP_MSG_ACCESS #include "gcs_comp_msg.hpp" static inline int comp_msg_size (int memb_num) { return (sizeof(gcs_comp_msg_t) + memb_num * sizeof(gcs_comp_memb_t)); } /*! Allocates membership object and zeroes it */ gcs_comp_msg_t* gcs_comp_msg_new (bool prim, bool bootstrap, int my_idx, int memb_num, int error) { gcs_comp_msg_t* ret; assert ((memb_num > 0 && my_idx >= 0) || (memb_num == 0 && my_idx == -1)); ret = static_cast(gu_calloc (1, comp_msg_size(memb_num))); if (NULL != ret) { ret->primary = prim; ret->bootstrap = bootstrap; ret->my_idx = my_idx; ret->memb_num = memb_num; ret->error = error; } return ret; } gcs_comp_msg_t* gcs_comp_msg_leave (int error) { return gcs_comp_msg_new (false, false, -1, 0, error); } /*! Destroys component message */ void gcs_comp_msg_delete (gcs_comp_msg_t* comp) { gu_free (comp); } /*! Returns total size of the component message */ int gcs_comp_msg_size (const gcs_comp_msg_t* comp) { assert (comp); return comp_msg_size (comp->memb_num); } /*! Adds a member to the component message * Returns an index of the member or negative error code */ int gcs_comp_msg_add (gcs_comp_msg_t* comp, const char* id, gcs_segment_t const segment) { size_t id_len; int i; assert (comp); assert (id); /* check id length */ id_len = strlen (id); if (!id_len) return -EINVAL; if (id_len > GCS_COMP_MEMB_ID_MAX_LEN) return -ENAMETOOLONG; int free_slot = -1; /* find the free id slot and check for id uniqueness */ for (i = 0; i < comp->memb_num; i++) { if (0 == comp->memb[i].id[0] && free_slot < 0) free_slot = i; if (0 == strcmp (comp->memb[i].id, id)) return -ENOTUNIQ; } if (free_slot < 0) return -1; memcpy (comp->memb[free_slot].id, id, id_len); comp->memb[free_slot].segment = segment; return free_slot; } /*! Creates a copy of the component message */ gcs_comp_msg_t* gcs_comp_msg_copy (const gcs_comp_msg_t* comp) { size_t size = gcs_comp_msg_size(comp); gcs_comp_msg_t* ret = static_cast(gu_malloc (size)); if (ret) memcpy (ret, comp, size); return ret; } /*! Returns member ID by index, NULL if none */ const gcs_comp_memb_t* gcs_comp_msg_member (const gcs_comp_msg_t* comp, int idx) { if (0 <= idx && idx < comp->memb_num) return &comp->memb[idx]; else return NULL; } /*! Returns member index by ID, -1 if none */ int gcs_comp_msg_idx (const gcs_comp_msg_t* comp, const char* id) { size_t id_len = strlen(id); int idx = comp->memb_num; if (id_len > 0 && id_len <= GCS_COMP_MEMB_ID_MAX_LEN) for (idx = 0; idx < comp->memb_num; idx++) if (0 == strcmp (comp->memb[idx].id, id)) break; if (comp->memb_num == idx) return -1; else return idx; } /*! Returns primary status of the component */ bool gcs_comp_msg_primary (const gcs_comp_msg_t* comp) { return comp->primary; } /*! Retruns bootstrap flag of the component */ bool gcs_comp_msg_bootstrap(const gcs_comp_msg_t* comp) { return comp->bootstrap; } /*! Returns our own index in the membership */ int gcs_comp_msg_self (const gcs_comp_msg_t* comp) { return comp->my_idx; } /*! Returns number of members in the component */ int gcs_comp_msg_num (const gcs_comp_msg_t* comp) { return comp->memb_num; } int gcs_comp_msg_error(const gcs_comp_msg_t* comp) { return comp->error; } galera-4-26.4.25/gcs/src/gcs_act_proto.hpp000644 000164 177776 00000004437 15107057155 021426 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2019 Codership Oy * * $Id$ */ /* * Interface to action protocol * (to be extended to support protocol versions, currently supports only v0) */ #ifndef _gcs_act_proto_h_ #define _gcs_act_proto_h_ #include "gcs.hpp" // for gcs_seqno_t #include #include typedef uint8_t gcs_proto_t; /*! Supported protocol range * * 0 - initial version * 1 - support for totally ordered CC events * 2 - support for commit cut in state exchange msg * 3 - fix for commit cut tracking issue * (needs protocol version bump to keep it identical on all nodes) * 4 - fix for the error voting protocol * (must keep it identical on all nodes) * 5 - fix for commit cut tracking for just SYNCED nodes * (must keep it identical on all nodes) * 6 - fix for commit cut "infection" from a node with different history * (must keep it identical on all nodes) */ #define GCS_PROTO_MAX 6 /*! Internal action fragment data representation */ typedef struct gcs_act_frag { gcs_seqno_t act_id; size_t act_size; const void* frag; // shall override it only once size_t frag_len; unsigned long frag_no; gcs_act_type_t act_type; int proto_ver; } gcs_act_frag_t; /*! Writes header data into actual header of the message. * Remainig fragment buf and length is in frag->frag and frag->frag_len */ extern long gcs_act_proto_write (gcs_act_frag_t* frag, void* buf, size_t buf_len); /*! Reads header data from the actual header of the message * Remainig fragment buf and length is in frag->frag and frag->frag_len */ extern long gcs_act_proto_read (gcs_act_frag_t* frag, const void* buf, size_t buf_len); /*! Increments fragment counter when action remains the same. * * @return non-negative counter value on success */ static inline long gcs_act_proto_inc (void* buf) { uint32_t frag_no = gtohl(((uint32_t*)buf)[3]) + 1; #ifdef GCS_DEBUG_PROTO if (!frag_no) return -EOVERFLOW; #endif ((uint32_t*)buf)[3] = htogl(frag_no); return frag_no; } /*! Returns protocol header size */ extern long gcs_act_proto_hdr_size (long version); /*! Returns message protocol version */ static inline int gcs_act_proto_ver (void* buf) { return *((uint8_t*)buf); } #endif /* _gcs_act_proto_h_ */ galera-4-26.4.25/gcs/src/SConscript000644 000164 177776 00000006247 15107057155 020102 0ustar00jenkinsnogroup000000 000000 # Import('env') # Clone environment as we need to tune compilation flags libgcs_env = env.Clone() # Include paths libgcs_env.Append(CPPPATH = Split(''' #/common #/galerautils/src #/gcomm/src #/gcache/src ''')) # Backends (TODO: Get from global options) libgcs_env.Append(CPPFLAGS = ' -DGCS_USE_GCOMM') # For C-style logging libgcs_env.Append(CPPFLAGS = ' -DGALERA_LOG_H_ENABLE_CXX') # Disable old style cast warns until code is fixed libgcs_env.Replace(CXXFLAGS = libgcs_env['CXXFLAGS'].replace('-Wold-style-cast', '')) libgcs_env.Replace(CXXFLAGS = libgcs_env['CXXFLAGS'].replace('-Weffc++', '')) # Allow zero sized arrays libgcs_env.Replace(CCFLAGS = libgcs_env['CCFLAGS'].replace('-pedantic', '')) libgcs_env.Append(CCFLAGS = ' -Wno-missing-field-initializers') libgcs_env.Append(CCFLAGS = ' -Wno-variadic-macros') print('gcs flags:') for f in ['CFLAGS', 'CXXFLAGS', 'CCFLAGS', 'CPPFLAGS']: print(f + ': ' + libgcs_env[f].strip()) gcs4garb_env = libgcs_env.Clone() libgcs_sources = Split(''' gcs_params.cpp gcs_conf.cpp gcs_fifo_lite.cpp gcs_msg_type.cpp gcs_comp_msg.cpp gcs_sm.cpp gcs_backend.cpp gcs_dummy.cpp gcs_act_proto.cpp gcs_defrag.cpp gcs_state_msg.cpp gcs_node.cpp gcs_act_cchange.cpp gcs_code_msg.cpp gcs_group.cpp gcs_core.cpp gcs_fc.cpp gcs.cpp gcs_gcomm.cpp gcs_error.cpp ''') #libgcs_env.VariantDir('.gcs', '.', duplicate=0) libgcs_env.StaticLibrary('gcs', libgcs_sources) # TODO: How to tell scons portably that C++ linker should be used # and program should be linked statically gcs_test_env = libgcs_env.Clone() gcs_test_env.Prepend(LIBS = File('#/galerautils/src/libgalerautils.a')) gcs_test_env.Prepend(LIBS = File('#/galerautils/src/libgalerautils++.a')) gcs_test_env.Prepend(LIBS = File('#/gcomm/src/libgcomm.a')) gcs_test_env.Prepend(LIBS = File('#/gcache/src/libgcache.a')) gcs_test_env.Prepend(LIBS = File('#/gcs/src/libgcs.a')) gcs_test_env.Program(target = 'gcs_test', source = 'gcs_test.cpp', LINK = libgcs_env['CXX']) SConscript('unit_tests/SConscript') # env.Append(LIBGALERA_OBJS = libgcs_env.SharedObject(libgcs_sources)) gcs4garb_env.Append(CPPFLAGS = ' -DGCS_FOR_GARB') garb_obj_dir = '.garb' gcs4garb_env.VariantDir(garb_obj_dir, '.', duplicate = 0) #garb_objects = [os.path.splitext(src)[0] + '_garb' + # env['OBJSUFFIX'] for src in libgcs_sources] garb_sources = [ garb_obj_dir + '/' + src for src in libgcs_sources ] gcs4garb_env.StaticLibrary('gcs4garb', garb_sources) Clean('.', garb_obj_dir) galera-4-26.4.25/gcs/src/gcs_dummy.hpp000644 000164 177776 00000002254 15107057155 020562 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2014 Codership Oy * * $Id$ */ /* * Dummy backend specification */ #ifndef _gcs_dummy_h_ #define _gcs_dummy_h_ #include "gcs_backend.hpp" #include "gcs_comp_msg.hpp" extern GCS_BACKEND_REGISTER_FN (gcs_dummy_register); extern GCS_BACKEND_CREATE_FN (gcs_dummy_create); #ifdef GCS_DUMMY_TESTING /* * What follows is an API for unit testing */ /*! Injects a message in the message queue to produce a desired msg sequence. */ extern long gcs_dummy_inject_msg (gcs_backend_t* backend, const void* msg, size_t len, gcs_msg_type_t type, long sender_idx); /*! Sets the new component view. * The same component message should be injected in the queue separately * (see gcs_dummy_inject_msg()) in order to model different race conditions */ extern long gcs_dummy_set_component (gcs_backend_t* backend, const gcs_comp_msg_t* comp); /*! Is needed to set transitional state */ extern long gcs_dummy_set_transitional (gcs_backend_t* backend); #endif /* GCS_DUMMY_TESTING */ #endif /* _gcs_dummy_h_ */ galera-4-26.4.25/gcs/src/gcs_error.cpp000644 000164 177776 00000001606 15107057155 020553 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2024 Codership Oy */ #include "gcs_error.hpp" #include #include const char* gcs_error_str(int err) { switch (err) { case EINTR: return "Operation interrupted"; case EAGAIN: return "Operation failed temporarily"; case EPERM: case ENOTCONN: return "Not in primary component"; case ECONNABORTED: return "Connection was closed"; case EBADF: return "Connection not initialized"; case ETIMEDOUT: return "Operation timed out"; default: return strerror(err); } } const char* gcs_state_transfer_error_str(int err) { switch (err) { case EAGAIN: return "No donor candidates temporarily available in suitable state"; case EHOSTUNREACH: return "Requested donor is not available"; case EHOSTDOWN: return "Joiner and donor can't be the same node"; default: return gcs_error_str(err); } } galera-4-26.4.25/gcs/src/gcs_spread.hpp000644 000164 177776 00000000417 15107057155 020704 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* * Definition of Spread GC backend */ #ifndef _gcs_spread_h_ #define _gcs_spread_h_ #include "gcs_backend.h" extern GCS_BACKEND_CREATE_FN (gcs_spread_create); #endif /* _gcs_spread_h_ */ galera-4-26.4.25/gcs/src/gcs_state_msg.hpp000644 000164 177776 00000015712 15107057155 021420 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ /* * Interface to state messages * */ #ifndef _gcs_state_msg_h_ #define _gcs_state_msg_h_ #include "gcs.hpp" #include "gcs_seqno.hpp" #include "gcs_act_proto.hpp" #include #include /* State flags */ #define GCS_STATE_FREP 0x01 // group representative #define GCS_STATE_FCLA 0x02 // count last applied (for JOINED node) #define GCS_STATE_FBOOTSTRAP 0x04 // part of prim bootstrap process #define GCS_STATE_FSTATELESS 0x08 // arbitrator or otherwise stateless node #ifdef GCS_STATE_MSG_ACCESS typedef struct gcs_state_msg { gu_uuid_t state_uuid; // UUID of the current state exchange gu_uuid_t group_uuid; // UUID of the group gu_uuid_t prim_uuid; // last PC state UUID gcs_seqno_t prim_seqno; // last PC state seqno gcs_seqno_t received; // last action seqno (received up to) gcs_seqno_t cached; // earliest action cached gcs_seqno_t last_applied; // last applied action reported by node gcs_seqno_t vote_seqno; // last seqno node voted on int64_t vote_res; // the vote reported by node const char* name; // human assigned node name const char* inc_addr; // incoming address string int version; // version of state message int gcs_proto_ver; int repl_proto_ver; int appl_proto_ver; int prim_gcs_ver; int prim_repl_ver; int prim_appl_ver; int prim_joined; // number of joined nodes in its last PC int desync_count; uint8_t vote_policy; // voting policy the node is using gcs_node_state_t prim_state; // state of the node in its last PC gcs_node_state_t current_state; // current state of the node uint8_t flags; } gcs_state_msg_t; #else typedef struct gcs_state_msg gcs_state_msg_t; #endif /*! Quorum decisions */ typedef struct gcs_state_quorum { gu_uuid_t group_uuid; //! group UUID gcs_seqno_t act_id; //! next global seqno gcs_seqno_t conf_id; //! configuration id gcs_seqno_t last_applied; //! group-wide commit cut bool primary; //! primary configuration or not int version; //! state excahnge version (max understood by all) int gcs_proto_ver; int repl_proto_ver; int appl_proto_ver; uint8_t vote_policy; } gcs_state_quorum_t; #define GCS_VOTE_ZERO_WINS 1 #define GCS_QUORUM_NON_PRIMARY (gcs_state_quorum_t){ \ GU_UUID_NIL, \ GCS_SEQNO_ILL, \ GCS_SEQNO_ILL, \ GCS_SEQNO_ILL, \ false, \ -1, -1, -1, -1, GCS_VOTE_ZERO_WINS \ } extern gcs_state_msg_t* gcs_state_msg_create (const gu_uuid_t* state_uuid, const gu_uuid_t* group_uuid, const gu_uuid_t* prim_uuid, gcs_seqno_t prim_seqno, gcs_seqno_t received, gcs_seqno_t cached, gcs_seqno_t last_applied, gcs_seqno_t vote_seqno, int64_t vote_res, uint8_t vote_policy, int prim_joined, gcs_node_state_t prim_state, gcs_node_state_t current_state, const char* name, const char* inc_addr, int gcs_proto_ver, int repl_proto_ver, int appl_proto_ver, int prim_gcs_ver, int prim_repl_ver, int prim_appl_ver, int desync_count, uint8_t flags); extern void gcs_state_msg_destroy (gcs_state_msg_t* state); /* Returns length needed to serialize gcs_state_msg_t for sending */ extern size_t gcs_state_msg_len (gcs_state_msg_t* state); /* Serialize gcs_state_msg_t into message */ extern ssize_t gcs_state_msg_write (void* msg, const gcs_state_msg_t* state); /* De-serialize gcs_state_msg_t from message */ extern gcs_state_msg_t* gcs_state_msg_read (const void* msg, ssize_t msg_len); /* Get state uuid */ extern const gu_uuid_t* gcs_state_msg_uuid (const gcs_state_msg_t* state); /* Get group uuid */ extern const gu_uuid_t* gcs_state_msg_group_uuid (const gcs_state_msg_t* state); /* Get last PC uuid */ //extern const gu_uuid_t* //gcs_state_prim_uuid (const gcs_state_msg_t* state); /* Get last received action seqno */ extern gcs_seqno_t gcs_state_msg_received (const gcs_state_msg_t* state); /* Get lowest cached action seqno */ extern gcs_seqno_t gcs_state_msg_cached (const gcs_state_msg_t* state); /* Get current node state */ extern gcs_node_state_t gcs_state_msg_current_state (const gcs_state_msg_t* state); /* Get last prim node state */ extern gcs_node_state_t gcs_state_msg_prim_state (const gcs_state_msg_t* state); /* Get node name */ extern const char* gcs_state_msg_name (const gcs_state_msg_t* state); /* Get node incoming address */ extern const char* gcs_state_msg_inc_addr (const gcs_state_msg_t* state); /* Get last applied action seqno */ gcs_seqno_t gcs_state_msg_last_applied (const gcs_state_msg_t* state); /* Get last vote */ void gcs_state_msg_last_vote (const gcs_state_msg_t* state, gcs_seqno_t& seqno, int64_t& res); /* Get vote policy */ uint8_t gcs_state_msg_vote_policy (const gcs_state_msg_t* state); /* Get supported protocols */ extern void gcs_state_msg_get_proto_ver (const gcs_state_msg_t* state, int* gcs_proto_ver, int* repl_proto_ver, int* appl_proto_ver); /* Get desync count */ extern int gcs_state_msg_get_desync_count(const gcs_state_msg_t* state); /* Get state message flags */ extern uint8_t gcs_state_msg_flags (const gcs_state_msg_t* state); /*! Get quorum decision from state messages * * @param[in] states array of state message pointers * @param[in] states_num length of array * @param[out] quorum quorum calculations result * @retval 0 if there were no errors during processing. Quorum results are in * quorum parameter */ extern long gcs_state_msg_get_quorum (const gcs_state_msg_t* states[], size_t states_num, gcs_state_quorum_t* quorum); /* Print state message contents to buffer */ extern int gcs_state_msg_snprintf (char* str, size_t size, const gcs_state_msg_t* msg); #endif /* _gcs_state_msg_h_ */ galera-4-26.4.25/gcs/src/gcs_code_msg.cpp000644 000164 177776 00000000316 15107057155 021177 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015 Codership Oy */ #include "gcs_code_msg.hpp" void gcs::core::CodeMsg::print(std::ostream& os) const { os << gu::GTID(uuid(), seqno()) << ',' << code(); } galera-4-26.4.25/gcs/src/gcs_test.hpp000644 000164 177776 00000010474 15107057155 020411 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ #ifndef _gcs_test_h_ #define _gcs_test_h_ // some data to test bugger packets static char gcs_test_data[] = "001 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "002 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "003 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "004 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "005 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "006 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "007 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "008 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "009 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "010 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "011 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "012 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "013 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "014 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "015 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "016 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "017 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "018 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "019 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "020 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "021 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "022 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "023 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "024 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "025 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "026 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "027 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "028 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "029 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "030 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "031 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "032 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "033 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "034 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "035 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "036 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "037 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "038 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "039 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "040 456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" "041 4567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234" ; #endif galera-4-26.4.25/gcs/src/gcs_conf.cpp000644 000164 177776 00000001135 15107057155 020344 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /* Logging options */ #include #include "gcs.hpp" long gcs_conf_set_log_file (FILE *file) { return gu_conf_set_log_file (file); } long gcs_conf_set_log_callback (void (*logger) (int, const char*)) { return gu_conf_set_log_callback (logger); } long gcs_conf_self_tstamp_on () { return gu_conf_self_tstamp_on (); } long gcs_conf_self_tstamp_off () { return gu_conf_self_tstamp_off (); } long gcs_conf_debug_on () { return gu_conf_debug_on (); } long gcs_conf_debug_off () { return gu_conf_debug_off (); } galera-4-26.4.25/gcs/src/gcs_msg_type.cpp000644 000164 177776 00000000471 15107057155 021250 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #include "gcs_msg_type.hpp" const char* gcs_msg_type_string[GCS_MSG_MAX] = { "ERROR", "ACTION", "LAST", "COMPONENT", "STATE_UUID", "STATE_MSG", "JOIN", "SYNC", "FLOW", "VOTE", "CAUSAL" }; galera-4-26.4.25/gcs/src/gcs_fifo_lite.hpp000644 000164 177776 00000011310 15107057155 021360 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2011 Codership Oy * * $Id$ * * FIFO "class" customized for particular purpose * (here I decided to sacrifice generality for efficiency). * Implements fixed size "mallocless" FIFO (read "ring buffer"). * Except gcs_fifo_create() there are two types of fifo * access methods - protected and unprotected. Unprotected * methods assume that calling routines implement their own * protection, and thus are simplified for speed. */ #ifndef _GCS_FIFO_LITE_H_ #define _GCS_FIFO_LITE_H_ #include #include #include #include #include #include #include "gcs.hpp" typedef struct gcs_fifo_lite { long length; ulong item_size; ulong mask; ulong head; ulong tail; long used; bool closed; bool destroyed; long put_wait; long get_wait; gu_cond_t put_cond; gu_cond_t get_cond; gu_mutex_t lock; void* queue; } gcs_fifo_lite_t; /* Creates FIFO object. Since it practically consists of array of (void*), * the length can be chosen arbitrarily high - to minimize the risk * of overflow situation. */ gcs_fifo_lite_t* gcs_fifo_lite_create (size_t length, size_t item_size); void gcs_fifo_lite_close (gcs_fifo_lite_t* fifo); void gcs_fifo_lite_open (gcs_fifo_lite_t* fifo); long gcs_fifo_lite_destroy (gcs_fifo_lite_t* fifo); static inline void* _gcs_fifo_lite_tail (gcs_fifo_lite_t* f) { return ((char*)f->queue + f->tail * f->item_size); } static inline void* _gcs_fifo_lite_head (gcs_fifo_lite_t* f) { return ((char*)f->queue + f->head * f->item_size); } #define GCS_FIFO_LITE_LOCK \ if (gu_unlikely (gu_mutex_lock (&fifo->lock))) { \ gu_fatal ("Mutex lock failed."); \ abort(); \ } /*! If FIFO is not full, returns pointer to the tail item and locks FIFO, * otherwise blocks. Or returns NULL if FIFO is closed. */ static inline void* gcs_fifo_lite_get_tail (gcs_fifo_lite_t* fifo) { void* ret = NULL; GCS_FIFO_LITE_LOCK; while (!fifo->closed && fifo->used >= fifo->length) { fifo->put_wait++; gu_cond_wait (&fifo->put_cond, &fifo->lock); } if (gu_likely(!fifo->closed)) { assert (fifo->used < fifo->length); ret = _gcs_fifo_lite_tail (fifo); } else { gu_mutex_unlock (&fifo->lock); } return ret; } /*! Advances FIFO tail and unlocks FIFO */ static inline void gcs_fifo_lite_push_tail (gcs_fifo_lite_t* fifo) { fifo->tail = (fifo->tail + 1) & fifo->mask; fifo->used++; assert (fifo->used <= fifo->length); if (fifo->get_wait > 0) { fifo->get_wait--; gu_cond_signal (&fifo->get_cond); } gu_mutex_unlock (&fifo->lock); } /*! If FIFO is not empty, returns pointer to the head item and locks FIFO, * or returns NULL if FIFO is empty. Blocking behaviour disabled since * it is not needed in GCS: recv_thread should never block. */ static inline void* gcs_fifo_lite_get_head (gcs_fifo_lite_t* fifo) { void* ret = NULL; GCS_FIFO_LITE_LOCK; /* Uncomment this for blocking behaviour while (!fifo->closed && 0 == fifo->used) { fifo->get_wait++; gu_cond_wait (&fifo->get_cond, &fifo->lock); } */ if (gu_likely(fifo->used > 0)) { ret = _gcs_fifo_lite_head (fifo); } else { gu_mutex_unlock (&fifo->lock); } return ret; } /*! Advances FIFO head and unlocks FIFO */ static inline void gcs_fifo_lite_pop_head (gcs_fifo_lite_t* fifo) { fifo->head = (fifo->head + 1) & fifo->mask; fifo->used--; assert (fifo->used != -1); if (fifo->put_wait > 0) { fifo->put_wait--; gu_cond_signal (&fifo->put_cond); } gu_mutex_unlock (&fifo->lock); } /*! Unlocks FIFO */ static inline long gcs_fifo_lite_release (gcs_fifo_lite_t* fifo) { return (gu_mutex_unlock (&fifo->lock)); } /*! Removes item from tail, returns true if success */ static inline bool gcs_fifo_lite_remove (gcs_fifo_lite_t* const fifo) { bool ret = false; assert (fifo); GCS_FIFO_LITE_LOCK; if (fifo->used) { fifo->tail = (fifo->tail - 1) & fifo->mask; fifo->used--; ret = true; if (fifo->put_wait > 0) { fifo->put_wait--; gu_cond_signal (&fifo->put_cond); } } gu_mutex_unlock (&fifo->lock); return ret; } static inline bool gcs_fifo_lite_not_full (const gcs_fifo_lite_t* const fifo) { return (fifo->used < fifo->length); } #endif /* _GCS_FIFO_LITE_H_ */ galera-4-26.4.25/gcs/src/gcs_defrag.hpp000644 000164 177776 00000003655 15107057155 020665 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008 Codership Oy * * $Id$ */ /*! * Receiving action context */ #ifndef _gcs_defrag_h_ #define _gcs_defrag_h_ #include "gcs.hpp" // for gcs_seqno_t et al. #include "gcs_act_proto.hpp" #include "gcs_act.hpp" #include "gcs_gcache.hpp" #include // for memset() #include typedef struct gcs_defrag { gcache_t* cache; gcs_seqno_t sent_id; // sent id (unique for a node) uint8_t* head; // head of action buffer uint8_t* tail; // tail of action data size_t size; size_t received; ulong frag_no; // number of fragment received bool reset; } gcs_defrag_t; static inline void gcs_defrag_init (gcs_defrag_t* df, gcache_t* cache) { memset (df, 0, sizeof (*df)); df->cache = cache; df->sent_id = GCS_SEQNO_ILL; } /*! * Handle received action fragment * * @return 0 - success, * size of action - success, full action received, * negative - error. */ extern ssize_t gcs_defrag_handle_frag (gcs_defrag_t* df, const gcs_act_frag_t* frg, struct gcs_act* act, bool local); /*! Deassociate, but don't deallocate action resources */ static inline void gcs_defrag_forget (gcs_defrag_t* df) { gcs_defrag_init (df, df->cache); } /*! Free resources associated with defrag (for lost node cleanup) */ static inline void gcs_defrag_free (gcs_defrag_t* df) { #ifndef GCS_FOR_GARB if (df->head) { gcs_gcache_free (df->cache, df->head); // df->head, df->tail will be zeroed in gcs_defrag_init() below } #else assert(NULL == df->head); #endif gcs_defrag_init (df, df->cache); } /*! Mark current action as reset */ static inline void gcs_defrag_reset (gcs_defrag_t* df) { df->reset = true; } #endif /* _gcs_defrag_h_ */ galera-4-26.4.25/gcs/src/unit_tests/000755 000164 177776 00000000000 15107057160 020254 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcs/src/unit_tests/gcs_test_utils.cpp000644 000164 177776 00000043553 15107057155 024031 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015-2021 Codership Oy */ #include "gcs_test_utils.hpp" #include "gu_inttypes.hpp" namespace gcs_test { void InitConfig::common_ctor(gu::Config& cfg) { gcache::GCache::register_params(cfg); gcs_register_params(cfg); } InitConfig::InitConfig(gu::Config& cfg) { common_ctor(cfg); } InitConfig::InitConfig(gu::Config& cfg, const std::string& base_name) { common_ctor(cfg); std::string p("gcache.size=1M;gcache.name="); p += base_name; gu_trace(cfg.parse(p)); } GcsGroup::GcsGroup() : conf_ (), init_ (conf_, "group"), gcache_ (NULL), group_ (NULL), initialized_(false) {} void GcsGroup::common_ctor(const char* node_name, const char* inc_addr, gcs_proto_t gver, int rver, int aver) { assert(NULL == gcache_); assert(false == initialized_); conf_.set("gcache.name", std::string(node_name) + ".cache"); gcache_ = new gcache::GCache(NULL, conf_, "."); group_ = new gcs_group(conf_, reinterpret_cast(gcache_), node_name, inc_addr, gver, rver, aver); initialized_ = true; } void GcsGroup::common_dtor() { if (initialized_) { assert(NULL != gcache_); assert(NULL != group_); delete group_; delete gcache_; std::string const gcache_name(conf_.get("gcache.name")); ::unlink(gcache_name.c_str()); } else { assert(NULL == gcache_); assert(NULL == group_); } } void GcsGroup::init(const char* node_name, const char* inc_addr, gcs_proto_t gcs_proto_ver, int repl_proto_ver, int appl_proto_ver) { common_dtor(); initialized_ = false; gcache_ = NULL; common_ctor(node_name, inc_addr,gcs_proto_ver,repl_proto_ver,appl_proto_ver); } GcsGroup::~GcsGroup() { common_dtor(); } } // namespace #include "../gcs_comp_msg.hpp" #include gcs_seqno_t gt_node::deliver_last_applied(int const from, gcs_seqno_t const la) { gcs_seqno_t buf(gcs_seqno_htog(la)); gcs_recv_msg_t const msg(&buf, sizeof(buf), sizeof(buf), from, GCS_MSG_LAST); return gcs_group_handle_last_msg(group(), &msg); } gt_node::gt_node(const char* const name, int const gcs_proto_ver) : group(), id() { if (name) { snprintf(id, sizeof(id) - 1, "%s", name); } else { snprintf(id, sizeof(id) - 1, "%p", this); } id[sizeof(id) - 1] = '\0'; int const str_len = sizeof(id) + 6; char name_str[str_len] = { '\0', }; char addr_str[str_len] = { '\0', }; snprintf(name_str, str_len - 1, "name:%s", id); snprintf(addr_str, str_len - 1, "addr:%s", id); group.init(name_str, addr_str, gcs_proto_ver, 0, 0); } gt_node::~gt_node() { } /* delivers new component message to all memebers */ int gt_group::deliver_component_msg(bool const prim) { for (int i = 0; i < nodes_num; i++) { gcs_comp_msg_t* msg = gcs_comp_msg_new(prim, false, i, nodes_num, 0); if (msg) { for (int j = 0; j < nodes_num; j++) { const struct gt_node* const node(nodes[j]); long ret = gcs_comp_msg_add (msg, node->id, j); ck_assert_msg(j == ret, "Failed to add %d member: %ld (%s)", j, ret, strerror(-ret)); /* check that duplicate node ID is ignored */ ret = gcs_comp_msg_add (msg, node->id, j); ck_assert_msg(ret < 0, "Added duplicate %d member", j); } /* check component message */ ck_assert(i == gcs_comp_msg_self(msg)); ck_assert(nodes_num == gcs_comp_msg_num(msg)); for (int j = 0; j < nodes_num; j++) { const char* const src_id = nodes[j]->id; const char* const dst_id = gcs_comp_msg_member(msg, j)->id; ck_assert_msg(!strcmp(src_id, dst_id), "%d node id %s, recorded in comp msg as %s", j, src_id, dst_id); gcs_segment_t const dst_seg(gcs_comp_msg_member(msg,j)->segment); ck_assert_msg(j == dst_seg, "%d node segment %d, recorded in comp msg as %d", j, j, (int)dst_seg); } gcs_group_state_t ret = gcs_group_handle_comp_msg(nodes[i]->group(), msg); ck_assert(ret == GCS_GROUP_WAIT_STATE_UUID); gcs_comp_msg_delete (msg); /* check that uuids are properly recorded in internal structures */ for (int j = 0; j < nodes_num; j++) { const char* src_id = nodes[j]->id; const char* dst_id = nodes[i]->group()->nodes[j].id; ck_assert_msg(!strcmp(src_id, dst_id), "%d node id %s, recorded at node %d as %s", j, src_id, i, dst_id); } } else { return -ENOMEM; } } return 0; } int gt_group::perform_state_exchange() { /* first deliver state uuid message */ gu_uuid_t state_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gcs_recv_msg_t uuid_msg(&state_uuid, sizeof (state_uuid), sizeof (state_uuid), 0, GCS_MSG_STATE_UUID); gcs_group_state_t state; int i; for (i = 0; i < nodes_num; i++) { state = gcs_group_handle_uuid_msg (nodes[i]->group(),&uuid_msg); ck_assert_msg(state == GCS_GROUP_WAIT_STATE_MSG, "Wrong group state after STATE_UUID message. " "Expected: %d, got: %d", GCS_GROUP_WAIT_STATE_MSG, state); } /* complete state message exchange */ for (i = 0; i < nodes_num; i++) { /* create state message from node i */ gcs_state_msg_t* state = gcs_group_get_state (nodes[i]->group()); ck_assert(NULL != state); ssize_t state_len = gcs_state_msg_len (state); uint8_t state_buf[state_len]; gcs_state_msg_write (state_buf, state); gcs_recv_msg_t state_msg(state_buf, sizeof (state_buf), sizeof (state_buf), i, GCS_MSG_STATE_MSG); /* deliver to each of the nodes */ int j; for (j = 0; j < nodes_num; j++) { gcs_group_state_t ret = gcs_group_handle_state_msg (nodes[j]->group(), &state_msg); if (nodes_num - 1 == i) { // a message from the last node ck_assert_msg(ret == GCS_GROUP_PRIMARY, "Handling state msg failed: sender %d, " "receiver %d", i, j); } else { ck_assert_msg(ret == GCS_GROUP_WAIT_STATE_MSG, "Handling state msg failed: sender %d, " "receiver %d", i, j); } } gcs_state_msg_destroy (state); } return 0; } int gt_group::add_node(struct gt_node* node, bool const new_id) { if (GT_MAX_NODES == nodes_num) return -ERANGE; if (new_id) { gu_uuid_t node_uuid; gu_uuid_generate (&node_uuid, NULL, 0); gu_uuid_print (&node_uuid, (char*)node->id, sizeof (node->id)); gu_debug ("Node %d (%p) UUID: %s", nodes_num, node, node->id); } nodes[nodes_num] = node; nodes_num++; /* check that all node ids are different */ int i; for (i = 0; i < nodes_num; i++) { int j; for (j = i+1; j < nodes_num; j++) { ck_assert_msg(strcmp(nodes[i]->id, nodes[j]->id), "%d (%p) and %d (%p) have the same id: %s/%s", i, nodes[i], j, nodes[j], nodes[i]->id, nodes[j]->id); } } /* deliver new component message to all nodes */ int ret = deliver_component_msg(primary); ck_assert_msg(ret == 0, "Component message delivery failed: %d (%s)", ret, strerror(-ret)); /* deliver state exchange uuid */ ret = perform_state_exchange(); ck_assert_msg(ret == 0, "State exchange failed: %d (%s)", ret, strerror(-ret)); return 0; } /* NOTE: this function uses simplified and determinitstic algorithm where * dropped node is always replaced by the last one in group. * For our purposes (reproduction of #465) it fits perfectly. */ struct gt_node* gt_group::drop_node(int const idx) { ck_assert(idx >= 0); ck_assert(idx < nodes_num); struct gt_node* dropped = nodes[idx]; nodes[idx] = nodes[nodes_num - 1]; nodes[nodes_num - 1] = NULL; nodes_num--; if (nodes_num > 0) { deliver_component_msg(primary); perform_state_exchange(); } return dropped; } /* for delivery of GCS_MSG_SYNC or GCS_MSG_JOIN msg*/ int gt_group::deliver_join_sync_msg(int const src, gcs_msg_type_t const type) { gcs_seqno_t seqno = nodes[src]->group()->act_id_; gcs_recv_msg_t msg(&seqno, sizeof (seqno), sizeof (seqno), src, type); int ret = -1; int i; for (i = 0; i < nodes_num; i++) { gcs_group_t* const group = nodes[i]->group(); switch (type) { case GCS_MSG_JOIN: ret = gcs_group_handle_join_msg(group, &msg); mark_point(); if (i == src) { ck_assert_msg(ret == 1, "%d failed to handle own JOIN message: %d (%s)", i, ret, strerror (-ret)); } else { ck_assert_msg(ret == 0, "%d failed to handle other JOIN message: %d (%s)", i, ret, strerror (-ret)); } break; case GCS_MSG_SYNC: ret = gcs_group_handle_sync_msg(group, &msg); if (i == src) { ck_assert_msg(ret == 1 || group->nodes[src].status != GCS_NODE_STATE_JOINED, "%d failed to handle own SYNC message: %d (%s)", i, ret, strerror (-ret)); } else { ck_assert_msg(ret == 0, "%d failed to handle other SYNC message: %d (%s)", i, ret, strerror (-ret)); } break; default: ck_abort_msg("wrong message type: %d", type); } } return ret; } gcs_seqno_t gt_group::deliver_last_applied(int const from, gcs_seqno_t const la) { gcs_seqno_t const ret(nodes_num > 0 ? nodes[0]->deliver_last_applied(from, la) : GCS_SEQNO_ILL); for (int i(1); i < nodes_num; ++i) { gcs_seqno_t const res(nodes[i]->deliver_last_applied(from, la)); ck_assert_msg((ret == res || proto_ver <= 2), "nodes[%d]->deliver_last_applied(%d, %" PRId64 "): %" PRId64 ", expected %" PRId64 ", proto_ver %d", i, from, la, res, ret, proto_ver); } return ret; } bool gt_group::verify_node_state_across(int const idx, gcs_node_state_t const check) const { for (int i(0); i < nodes_num; i++) { gcs_node_state_t const state(nodes[i]->group()->nodes[idx].status); if (check != state) { gu_error("At node %d node's %d status is not %d, but %d", i, idx, check, state); return false; } } return true; } /* start SST on behalf of node idx (joiner) * @return donor idx or negative error code */ int gt_group::sst_start (int const joiner_idx,const char* donor_name) { ck_assert(joiner_idx >= 0); ck_assert(joiner_idx < nodes_num); ssize_t const req_len = strlen(donor_name) + 2; // leave one byte as sst request payload int donor_idx = -1; int i; for (i = 0; i < nodes_num; i++) { gcache::GCache* const gcache(nodes[i]->group.gcache()); ck_assert(NULL != gcache); // sst request is expected to be dynamically allocated char* const req_buf = (char*)gcache->malloc(req_len); ck_assert(NULL != req_buf); ::memset(req_buf, 0, req_len); sprintf (req_buf, "%s", donor_name); struct gcs_act_rcvd req(gcs_act(req_buf, req_len, GCS_ACT_STATE_REQ), NULL, GCS_SEQNO_ILL, joiner_idx); int ret = gcs_group_handle_state_request(nodes[i]->group(), &req); if (ret < 0) { // don't fail here, we may want to test negatives gu_error ("Handling state request to '%s' failed: %d (%s)", donor_name, ret, strerror (-ret)); return ret; } if (i == joiner_idx) { ck_assert(ret == req_len); gcache->free(req_buf); // passed to joiner } else { if (ret > 0) { if (donor_idx < 0) { ck_assert(req.id == i); donor_idx = i; gcache->free(req_buf); // passed to donor } else { ck_abort_msg("More than one donor selected: %d, first " "donor: %d", i, donor_idx); } } } } ck_assert_msg(donor_idx >= 0, "Failed to select donor"); for (i = 0; i < nodes_num; i++) { gcs_group_t* const group = nodes[i]->group(); gcs_node_t* const donor = &group->nodes[donor_idx]; gcs_node_state_t state = donor->status; ck_assert_msg(state == GCS_NODE_STATE_DONOR, "%d is not donor at %d", donor_idx, i); int dc = donor->desync_count; ck_assert_msg(dc >= 1, "donor %d at %d has desync_count %d", donor_idx, i, dc); gcs_node_t* const joiner = &group->nodes[joiner_idx]; state = joiner->status; ck_assert_msg(state == GCS_NODE_STATE_JOINER, "%d is not joiner at %d", joiner_idx, i); dc = joiner->desync_count; ck_assert_msg(dc == 0, "joiner %d at %d has desync_count %d", donor_idx, i, dc); /* check that donor and joiner point at each other */ ck_assert_msg(!memcmp(group->nodes[donor_idx].joiner, group->nodes[joiner_idx].id, GCS_COMP_MEMB_ID_MAX_LEN+1), "Donor points at wrong joiner: expected %s, got %s", group->nodes[joiner_idx].id,group->nodes[donor_idx].joiner); ck_assert_msg(!memcmp(group->nodes[joiner_idx].donor, group->nodes[donor_idx].id, GCS_COMP_MEMB_ID_MAX_LEN+1), "Joiner points at wrong donor: expected %s, got %s", group->nodes[donor_idx].id,group->nodes[joiner_idx].donor); } return donor_idx; } /* Finish SST on behalf of a node idx (joiner or donor) */ int gt_group::sst_finish(int const idx) { gcs_node_state_t node_state; deliver_join_sync_msg(idx, GCS_MSG_JOIN); node_state = nodes[idx]->state(); ck_assert(node_state == GCS_NODE_STATE_JOINED); deliver_join_sync_msg(idx, GCS_MSG_SYNC); node_state = nodes[idx]->state(); ck_assert(node_state == GCS_NODE_STATE_SYNCED); return 0; } int gt_group::sync_node(int const joiner_idx) { gcs_node_state_t const node_state(nodes[joiner_idx]->state()); ck_assert(node_state == GCS_NODE_STATE_PRIM); // initiate SST int const donor_idx(sst_start(joiner_idx, "")); ck_assert(donor_idx >= 0); ck_assert(donor_idx != joiner_idx); // complete SST int err; err = sst_finish(donor_idx); ck_assert(0 == err); err = sst_finish(joiner_idx); ck_assert(0 == err); return 0; } gt_group::gt_group(int const num, int const gcs_proto_ver, bool const prim) : nodes(), nodes_num(0), proto_ver(gcs_proto_ver), primary(prim) { if (num > 0) { for (int i = 0; i < num; ++i) { char name[32]; sprintf(name, "%d", i); add_node(new gt_node(name, gcs_proto_ver), true); bool const prim_state(nodes[0]->group.state() == GCS_GROUP_PRIMARY); ck_assert(prim_state == prim); gcs_node_state_t node_state(nodes[i]->state()); if (primary) { if (0 == i) { ck_assert(node_state == GCS_NODE_STATE_JOINED); deliver_join_sync_msg(0, GCS_MSG_SYNC); node_state = nodes[0]->state(); ck_assert(node_state == GCS_NODE_STATE_SYNCED); } else { ck_assert(node_state == GCS_NODE_STATE_PRIM); // initiate SST int const donor_idx(sst_start(i, "")); ck_assert(donor_idx >= 0); ck_assert(donor_idx != i); // complete SST int err; err = sst_finish(donor_idx); ck_assert(0 == err); err = sst_finish(i); ck_assert(0 == err); } } else { ck_assert(node_state == GCS_NODE_STATE_NON_PRIM); } } } ck_assert(num == nodes_num); } gt_group::~gt_group() { while (nodes_num) { struct gt_node* const node(drop_node(0)); ck_assert(node != NULL); delete node; } } galera-4-26.4.25/gcs/src/unit_tests/gcs_group_test.cpp000644 000164 177776 00000101062 15107057155 024013 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ #include "../gcs_group.hpp" #include "../gcs_act_proto.hpp" #include "../gcs_comp_msg.hpp" #include #include "gcs_group_test.hpp" #include "gcs_test_utils.hpp" #include "gu_inttypes.hpp" #define TRUE (0 == 0) #define FALSE (!TRUE) /* * header will be written to buf from frg, act_len of payload will be copied * from act, msg structure will be filled in */ static void msg_write (gcs_recv_msg_t* msg, gcs_act_frag_t* frg, char* buf, size_t buf_len, const char* data, size_t data_len, int sender_idx, gcs_msg_type_t type) { long ret; ret = gcs_act_proto_write (frg, buf, buf_len); ck_assert_msg(0 == ret, "error code: %ld", ret); ck_assert(frg->frag != NULL); ck_assert_msg(frg->frag_len >= data_len, "Resulting frag_len %zu is less than required act_len %zu\n" "Refactor the test and increase buf_len.", frg->frag_len, data_len); memcpy ((void*)frg->frag, data, data_len); msg->buf = buf; msg->buf_len = buf_len; msg->size = (buf_len - frg->frag_len + data_len); msg->sender_idx = sender_idx; msg->type = type; } static gcs_group_state_t new_component (gcs_group_t* group, const gcs_comp_msg_t* comp) { gcs_group_state_t const ret(gcs_group_handle_comp_msg (group, comp)); // modelling real state exchange is really tedious here, just fake it group->state = GCS_GROUP_PRIMARY; return ret; } #define LOCALHOST "localhost" #define REMOTEHOST "remotehost" #define DISTANTHOST "distanthost" // This tests tests configuration changes START_TEST (gcs_group_configuration) { ssize_t ret; gcs_seqno_t seqno = 11; // The Action const char act_buf[] = "Test action smuction"; ssize_t act_len = sizeof (act_buf); // lengths of three fragments of the action long frag1_len = act_len / 3; long frag2_len = frag1_len; long frag3_len = act_len - frag1_len - frag2_len; // pointer to the three fragments of the action const char* frag1 = act_buf; const char* frag2 = frag1 + frag1_len; const char* frag3 = frag2 + frag2_len; // message buffers const long buf_len = 64; char buf1[buf_len], buf2[buf_len], buf3[buf_len], buf4[buf_len], buf5[buf_len]; // recv message structures gcs_recv_msg_t msg1, msg2, msg3, msg4, msg5; gcs_act_frag_t frg1, frg2, frg3, frg4, frg5, frg; struct gcs_act_rcvd r_act; struct gcs_act* act = &r_act.act; gcs_comp_msg_t* comp; mark_point(); #ifndef NDEBUG // debug build breaks the test due to asserts return; #endif // Initialize message parameters frg1.act_id = getpid(); frg1.act_size = act_len; frg1.frag = NULL; frg1.frag_len = 0; frg1.frag_no = 0; frg1.act_type = GCS_ACT_WRITESET; frg1.proto_ver = 0; // normal fragments frg2 = frg3 = frg1; frg2.frag_no = frg1.frag_no + 1; frg3.frag_no = frg2.frag_no + 1; // bad fragmets to be tried instead of frg2 frg4 = frg5 = frg2; frg4.act_id = frg2.act_id + 1; // wrong action id frg5.act_type = GCS_ACT_SERVICE; // wrong action type mark_point(); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 0,GCS_MSG_ACTION); msg_write (&msg2, &frg2, buf2, buf_len, frag2, frag2_len, 0,GCS_MSG_ACTION); msg_write (&msg3, &frg3, buf3, buf_len, frag3, frag3_len, 0,GCS_MSG_ACTION); msg_write (&msg4, &frg4, buf4, buf_len, "4444", 4, 0, GCS_MSG_ACTION); msg_write (&msg5, &frg5, buf5, buf_len, "55555", 5, 0, GCS_MSG_ACTION); mark_point(); // ready gu::Config cnf; gcs_group::register_params(cnf); gcs_group_t group(cnf, NULL, "my node", "my addr", 0, 0, 0); ck_assert(!gcs_group_is_primary(&group)); ck_assert(group.num == 0); // Prepare first primary component message containing only one node comp = gcs_comp_msg_new (TRUE, false, 0, 1, 0); ck_assert(comp != NULL); ck_assert(!gcs_comp_msg_add (comp, LOCALHOST, 0)); ret = new_component (&group, comp); ck_assert(ret >= 0); ck_assert(gcs_group_is_primary(&group)); ck_assert(0 == group.act_id_); group.act_id_ = seqno - 1; ck_assert(GCS_NODE_STATE_JOINED == group.nodes[0].status); #define TRY_MESSAGE(msg) \ ret = gcs_act_proto_read (&frg, (msg).buf, (msg).size); \ if (0 == ret) \ ret = gcs_group_handle_act_msg (&group, &frg, &(msg), &r_act, true); // 1. Try fragment that is not the first r_act = gcs_act_rcvd(); // ret = gcs_group_handle_act_msg (&group, &frg, &msg3, &r_act); TRY_MESSAGE(msg3); ck_assert_msg(ret == -EPROTO, "expected ret = %d, got %zd", -EPROTO, ret); ck_assert(act->buf == NULL); ck_assert(act->buf_len == 0); mark_point(); // 2. Try first fragment // ret = gcs_group_handle_act_msg (&group, &msg1, &r_act); TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf == NULL); ck_assert(act->buf_len == 0); #define TRY_WRONG_2ND_FRAGMENT(frag, res) \ /*ret = gcs_group_handle_act_msg (&group, &frag, &r_act);*/ \ TRY_MESSAGE(frag); \ ck_assert_msg(ret == res, \ "expected ret = %d, got %zd", res, ret); \ ck_assert(act->buf_len == 0); // 3. Try first fragment again gu_debug ("\n\nTRY_WRONG_2ND_FRAGMENT(msg1)"); TRY_WRONG_2ND_FRAGMENT(msg1, 0); // tolerate duplicate fragments // 4. Try third fragment gu_debug ("\n\nTRY_WRONG_2ND_FRAGMENT(msg3)"); TRY_WRONG_2ND_FRAGMENT(msg3, -EPROTO); // 5. Try fourth fragment gu_debug ("\n\nTRY_WRONG_2ND_FRAGMENT(msg4)"); TRY_WRONG_2ND_FRAGMENT(msg4, -EPROTO); // 6. Try fifth fragment gu_debug ("\n\nTRY_WRONG_2ND_FRAGMENT(msg5)"); TRY_WRONG_2ND_FRAGMENT(msg5, -EPROTO); // 7. Try correct second fragment TRY_MESSAGE(msg2); ck_assert(ret == 0); ck_assert(act->buf == NULL); ck_assert(act->buf_len == 0); // 8. Try third fragment, last one TRY_MESSAGE(msg3); ck_assert(ret == act_len); ck_assert(r_act.sender_idx == 0); ck_assert(act->buf != NULL); ck_assert(act->buf_len == act_len); ck_assert_msg(r_act.id == seqno, "Expected seqno %" PRId64 ", found %" PRId64, seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); r_act = gcs_act_rcvd(); // 10. New component message gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 1, 2, 0); ck_assert(comp != NULL); ck_assert(gcs_comp_msg_add (comp, REMOTEHOST, 1) >= 0); ck_assert(gcs_comp_msg_add (comp, LOCALHOST, 0) >= 0); ret = new_component (&group, comp); ck_assert(ret >= 0); ck_assert(gcs_group_is_primary(&group)); ck_assert(GCS_NODE_STATE_JOINED == group.nodes[1].status); ck_assert(GCS_NODE_STATE_JOINED > group.nodes[0].status); group.nodes[0].status = GCS_NODE_STATE_JOINED; // 11. Try the same with foreign action (now my index is 1 and sender is 0) TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); TRY_MESSAGE(msg2); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); gu_debug("\n\nTRY_MESSAGE(msg3)"); TRY_MESSAGE(msg3); ck_assert_msg(ret == act_len, "Expected ret = %zd, got %zd", act_len, ret); ck_assert(act->buf_len == act_len); ck_assert(act->buf != NULL); ck_assert_msg(!strncmp(static_cast(act->buf), act_buf, act_len), "Action received: '%s', expected '%s'", static_cast(act->buf), act_buf); ck_assert(r_act.sender_idx == 0); ck_assert(act->type == GCS_ACT_WRITESET); ck_assert_msg(r_act.id == seqno, "Expected seqno %" PRId64 ", found %" PRId64, seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); r_act = gcs_act_rcvd(); // 12. Try foreign action with a new node joined in the middle. TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 1, 3, 0); ck_assert(comp != NULL); ck_assert(gcs_comp_msg_add (comp, REMOTEHOST, 1) >= 0); ck_assert(gcs_comp_msg_add (comp, LOCALHOST, 0) >= 0); ck_assert(gcs_comp_msg_add (comp, DISTANTHOST,2) >= 0); ret = new_component (&group, comp); ck_assert(ret >= 0); ck_assert(gcs_group_is_primary(&group)); // now I must be able to resend the action from scratch TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); TRY_MESSAGE(msg2); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); TRY_MESSAGE(msg3); ck_assert(ret == act_len); ck_assert(act->buf_len == act_len); ck_assert(act->buf != NULL); ck_assert_msg(!strncmp(static_cast(act->buf), act_buf, act_len), "Action received: '%s', expected '%s'", static_cast(act->buf), act_buf); ck_assert(r_act.sender_idx == 0); ck_assert(act->type == GCS_ACT_WRITESET); ck_assert_msg(r_act.id == seqno, "Expected seqno %" PRId64 ", found %" PRId64, seqno, r_act.id); seqno++; // cleanup free ((void*)act->buf); r_act = gcs_act_rcvd(); // 13. Try to send an action with one node disappearing in the middle // and order of nodes changed // 13.1 Each node sends a message msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 0,GCS_MSG_ACTION); TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 1,GCS_MSG_ACTION); TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); msg_write (&msg1, &frg1, buf1, buf_len, frag1, frag1_len, 2,GCS_MSG_ACTION); TRY_MESSAGE(msg1); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); // 13.2 configuration changes, one node disappears // (REMOTEHOST, LOCALHOST, DISTANTHOST) -> (LOCALHOST, REMOTEHOST) gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (TRUE, false, 0, 2, 0); ck_assert(comp != NULL); ck_assert(gcs_comp_msg_add (comp, LOCALHOST, 0) >= 0); ck_assert(gcs_comp_msg_add (comp, REMOTEHOST,1) >= 0); ret = new_component (&group, comp); ck_assert(ret >= 0); ck_assert(gcs_group_is_primary(&group)); ck_assert(group.act_id_ + 1 == seqno); ck_assert(GCS_NODE_STATE_JOINED == group.nodes[1].status); ck_assert(GCS_NODE_STATE_JOINED == group.nodes[0].status); // 13.3 now I just continue sending messages TRY_MESSAGE(msg2); ck_assert_msg(ret == 0, "%zd (%s)", ret, strerror(-ret)); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); msg_write (&msg2, &frg2, buf2, buf_len, frag2, frag2_len, 1,GCS_MSG_ACTION); TRY_MESSAGE(msg2); ck_assert(ret == 0); ck_assert(act->buf_len == 0); ck_assert(act->buf == NULL); // local message - action must be resent gu_debug("\n\nLocal message 3"); TRY_MESSAGE(msg3); ck_assert(ret == act_len); ck_assert(act->buf_len == act_len); ck_assert(act->buf != NULL); ck_assert(r_act.sender_idx == 0); ck_assert(act->type == GCS_ACT_WRITESET); ck_assert_msg(!strncmp((const char*)act->buf, act_buf, act_len), "Action received: '%s', expected '%s'", static_cast(act->buf), act_buf); ck_assert_msg(r_act.id == -ERESTART, "Expected seqno %d, found %" PRId64, -ERESTART, r_act.id); // cleanup free ((void*)act->buf); r_act = gcs_act_rcvd(); // foreign message - action must be dropped (ignored) gu_debug("\n\nForeign message 3"); msg_write (&msg3, &frg3, buf3, buf_len, frag3, frag3_len, 1,GCS_MSG_ACTION); TRY_MESSAGE(msg3); ck_assert_msg(ret == 0, "Expected ret 0, got %zd", ret); ck_assert_msg(act->buf_len == 0, "Expected buf_len 0, got %zd", act->buf_len); ck_assert(act->buf == NULL); ck_assert_msg(r_act.sender_idx == -1, "Expected action sender -1, got %d", r_act.sender_idx); ck_assert(act->type == GCS_ACT_ERROR); ck_assert_msg(r_act.id == GCS_SEQNO_ILL, "Expected seqno %" PRId64 ", found %" PRId64, GCS_SEQNO_ILL, r_act.id); ck_assert(group.act_id_ + 1 == seqno); // cleanup free ((void*)act->buf); r_act = gcs_act_rcvd(); // Leave group gcs_comp_msg_delete (comp); comp = gcs_comp_msg_new (FALSE, false, -1, 0, 0); ck_assert(comp != NULL); ret = new_component (&group, comp); ck_assert(ret >= 0); gcs_comp_msg_delete (comp); } END_TEST // This tests last applied functionality static void test_last_applied(int const gcs_proto_ver) { // Create 4-node component gt_group gt(4, gcs_proto_ver, true); // group object of the 0th node gcs_group_t& group(*gt.nodes[0]->group()); // 0, 0, 0, 0 ck_assert_msg(group.last_applied == 0, "expected last_applied = 0, got %" PRId64, group.last_applied); gt.deliver_last_applied (0, 11); // 11, 0, 0, 0 ck_assert_msg(group.last_applied == 0, "expected last_applied = 0, got %" PRId64, group.last_applied); gt.deliver_last_applied (1, 12); // 11, 12, 0, 0 ck_assert(group.last_applied == 0); gt.deliver_last_applied (2, 13); // 11, 12, 13, 0 ck_assert(group.last_applied == 0); gt.deliver_last_applied (3, 14); // 11, 12, 13, 14 assert(group.last_applied == 11); ck_assert(group.last_applied == 11); // now must be 11 gt.deliver_last_applied (1, 16); // 11, 16, 13, 14 ck_assert(group.last_applied == 11); // now must still be 11 gt.deliver_last_applied (0, 17); // 17, 16, 13, 14 ck_assert(group.last_applied == 13); // now must be 13 gt.deliver_last_applied (3, 18); // 17, 16, 13, 18 ck_assert(group.last_applied == 13); // must still be 13 /* * remove the lagging node */ struct gt_node* gn(gt.drop_node(2)); ck_assert(gn != NULL); delete gn; // 17, 16, 18 // With GCS protocol 2 and above we use conservative group wide value from // the previous PC (13) as opposed to the minimal individual value (16) gcs_seqno_t expect(gcs_proto_ver < 2 ? 16 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 "\n" "Nodes: %ld; last_applieds: " "%" PRId64 ", %" PRId64 " , %" PRId64, expect, group.last_applied, group.num, group.nodes[0].last_applied, group.nodes[1].last_applied, group.nodes[2].last_applied); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(13 == group.nodes[1].last_applied, "expected 13, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); } /* Advance lagging node */ gt.deliver_last_applied (1, 17); // 17, 17, 18 expect = (gcs_proto_ver < 2 ? 17 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(17 == group.nodes[1].last_applied, "expected 13, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); } /* * add a new node and sync immediately */ ck_assert(0 == gt.add_node(new gt_node(DISTANTHOST"1", gcs_proto_ver),true)); ck_assert(0 == gt.sync_node(gt.nodes_num - 1)); // 17, 17, 18, 0 (v0-1) / 13, 17, 13, 0 (v2-) // With GCS protocol 2 and above last_applied can't go down. expect = (gcs_proto_ver < 2 ? 0 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(13 == group.nodes[1].last_applied, // back to conservative "expected 13, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(0 == group.nodes[3].last_applied, "expected 0, got %" PRId64, group.nodes[3].last_applied); } gt.deliver_last_applied (0, 18); gt.deliver_last_applied (2, 19); // 18, 17, 19, 0 (v0-1) / 13, 13, 13, 0 (v2-) expect = (gcs_proto_ver < 2 ? 0 : 13); // still keeping conservative cut 13 ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); /* remove last node to add unsynced */ gn = gt.drop_node(3); ck_assert(gn != NULL); delete gn; expect = (gcs_proto_ver < 2 ? 17 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); /* * add a new node but don't sync yet */ ck_assert(0 == gt.add_node(new gt_node(DISTANTHOST"2", gcs_proto_ver),true)); // 18, 17, 19, 0 (v0-1) / 13, 13, 13, 0 (v2-) expect = (gcs_proto_ver < 2 ? 17 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(13 == group.nodes[1].last_applied, "expected 13, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(0 == group.nodes[3].last_applied, "expected 0, got %" PRId64, group.nodes[3].last_applied); } /* Advance lagging node */ gt.deliver_last_applied (1, 18); // 18, 18, 19, 0 (v0-1) / 13, 18, 13, 0 (v2-) expect = (gcs_proto_ver < 2 ? 18 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(18 == group.nodes[1].last_applied, "expected 18, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(0 == group.nodes[3].last_applied, "expected 0, got %" PRId64, group.nodes[3].last_applied); } /* Advance non-synced node (should have no effect) */ gt.deliver_last_applied (3, 14); // 18, 18, 19, 14 (v0-1) / 13, 18, 13, 14 (v2-) expect = (gcs_proto_ver < 2 ? 18 : 13); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(13 == group.nodes[0].last_applied, "expected 13, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(18 == group.nodes[1].last_applied, "expected 18, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(13 == group.nodes[2].last_applied, "expected 13, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(14 == group.nodes[3].last_applied, "expected 14, got %" PRId64, group.nodes[3].last_applied); } /* Advance nodes 0 and 2 - this should advance global commit cut */ gt.deliver_last_applied (0, 20); gt.deliver_last_applied (2, 20); // 20, 18, 20, 14 (v0-1) / 20, 18, 20, 14 (v2-) expect = (gcs_proto_ver < 2 ? 18 : 18); ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(20 == group.nodes[0].last_applied, "expected 20, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(18 == group.nodes[1].last_applied, "expected 18, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(20 == group.nodes[2].last_applied, "expected 20, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(14 == group.nodes[3].last_applied, "expected 14, got %" PRId64, group.nodes[3].last_applied); } /* Sync node 3 - this shoud have no change */ ck_assert(0 == gt.sync_node(gt.nodes_num - 1)); expect = (gcs_proto_ver < 2 ? 14 : 18); // v0-1 shall decrease commit cut ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); /* Advance nodes 0, 1, 2 - under v5 it should not advance global commit cut*/ gt.deliver_last_applied (0, 21); gt.deliver_last_applied (1, 21); gt.deliver_last_applied (2, 21); // 21, 21, 21, 14 switch (gcs_proto_ver) { case 0: case 1: expect = 14; break; case 2: case 3: case 4: expect = 21; break; // codership/galera-bugs#1003: case 5: expect = 18; break; // don't advance commit cut if any SYNCED node is behind default: ck_assert_msg(false, "Unaccounted for gcs_proto_ver %d", gcs_proto_ver); } ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(21 == group.nodes[0].last_applied, "expected 21, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(21 == group.nodes[1].last_applied, "expected 21, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(21 == group.nodes[2].last_applied, "expected 21, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(14 == group.nodes[3].last_applied, "expected 14, got %" PRId64, group.nodes[3].last_applied); } /* Lagging synced node advances a bit, below global commit cut */ gt.deliver_last_applied (3, 15); // 21, 21, 21, 15 switch (gcs_proto_ver) { case 0: case 1: expect = 15; break; case 2: case 3: case 4: expect = 21; break; // codership/galera-bugs#1003 case 5: expect = 18; break; // don't advance commit cut if any SYNCED node is behind default: ck_assert_msg(false, "Unaccounted for gcs_proto_ver %d", gcs_proto_ver); } ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(21 == group.nodes[0].last_applied, "expected 21, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(21 == group.nodes[1].last_applied, "expected 21, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(21 == group.nodes[2].last_applied, "expected 21, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(15 == group.nodes[3].last_applied, "expected 15, got %" PRId64, group.nodes[3].last_applied); } /* Lagging synced node catches up wth global commit cut */ gt.deliver_last_applied (3, 22); // 21, 21, 21, 22 expect = 21; ck_assert_msg(group.last_applied == expect, "Expected %" PRId64 ", got %" PRId64 ", proto: %d\n", expect, group.last_applied, gcs_proto_ver); if (gcs_proto_ver >= 2) { ck_assert_msg(21 == group.nodes[0].last_applied, "expected 21, got %" PRId64, group.nodes[0].last_applied); ck_assert_msg(21 == group.nodes[1].last_applied, "expected 21, got %" PRId64, group.nodes[1].last_applied); ck_assert_msg(21 == group.nodes[2].last_applied, "expected 21, got %" PRId64, group.nodes[2].last_applied); ck_assert_msg(22 == group.nodes[3].last_applied, "expected 22, got %" PRId64, group.nodes[3].last_applied); } } START_TEST(gcs_group_last_applied_v0) { test_last_applied(0); } END_TEST START_TEST(gcs_group_last_applied_v1) { test_last_applied(1); } END_TEST START_TEST(gcs_group_last_applied_v2) { test_last_applied(2); } END_TEST START_TEST(gcs_group_last_applied_v3) { test_last_applied(3); } END_TEST START_TEST(gcs_group_last_applied_v4) { test_last_applied(4); } END_TEST START_TEST(gcs_group_last_applied_v5) { test_last_applied(5); } END_TEST // Test that setting stateless flag works static void test_stateless_flag(bool const f) { gu::Config cnf; gcs_group::register_params(cnf); cnf.set(GCS_STATELESS_KEY, f ? "true" : "false"); gcs_group_t group(cnf, NULL, "", "", 5, 11, 7); ck_assert(f == group.stateless); gcs_comp_msg_t* const msg(gcs_comp_msg_new(true, false, 0, 1, 0)); ck_assert(nullptr != msg); int const m(gcs_comp_msg_add(msg, LOCALHOST, 0)); ck_assert(0 == m); gcs_group_state_t const ret(new_component(&group, msg)); ck_assert(ret >= 0); ck_assert(f == group.nodes[0].stateless); gcs_comp_msg_delete(msg); } START_TEST(test_stateless_flag_false) { test_stateless_flag(false); } END_TEST START_TEST(test_stateless_flag_true) { test_stateless_flag(true); } END_TEST // Test donor selection algorithm based on // - smallest cached seqno (to avoid SST) // - segment affinity // - stateless flag static void test_gcs_group_find_donor(bool const a) { gu::Config cnf; gcs_group::register_params(cnf); cnf.set(GCS_STATELESS_KEY, a ? "true" : "false"); gcs_group_t group(cnf, NULL, "", "", 0, 0, 0); ck_assert(a == group.stateless); const char* s_group_uuid = "0d0d0d0d-0d0d-0d0d-0d0d-0d0d0d0d0d0d"; gu_uuid_scan(s_group_uuid, strlen(s_group_uuid), &group.group_uuid); // seven nodes // idx name segment seqno // 0th home0 0 90 // 1th home1 0 95 // 2th home2 0 105 // 3th home3 0(joiner)100 // 4th home4 1 90 // 5th home5 1 95 // 6th home6 1 105 const int number = 7; group_nodes_free(&group); group.nodes = (gcs_node_t*)malloc(sizeof(gcs_node_t) * number); group.num = number; const gcs_seqno_t seqnos[] = {90, 95, 105, 100, 90, 95, 105}; gcs_node_t* nodes = group.nodes; const int joiner = 3; const int arbitr = 0; for(int i = 0; i < number; i++) { uint8_t const vp(gcs_group_conf_to_vote_policy(cnf)); char name[32]; snprintf(name, sizeof(name), "home%d", i); bool const stateless(arbitr == i && group.stateless); gcs_node_init(&nodes[i], NULL, name, name, "", 0, 0, 0, i > joiner ? 1 : 0, stateless); uint8_t const flags(nodes[i].stateless ? GCS_STATE_FSTATELESS : 0); ck_assert(stateless == (flags != 0)); nodes[i].status = GCS_NODE_STATE_SYNCED; nodes[i].state_msg = gcs_state_msg_create( &GU_UUID_NIL, &GU_UUID_NIL, &GU_UUID_NIL, 0, 0, seqnos[i], 0, GCS_SEQNO_ILL, 0, vp, 0, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "", "", 0, 0, 0, 0, 0, 0, 0, flags); } ck_assert(nodes[arbitr].stateless == a); group.quorum.version = 3; // minimum quorum version for stateless flag // to have effect group.quorum.act_id = 0; // in safe range. ck_assert(group.quorum.gcs_proto_ver == -1); ck_assert(group.gcs_proto_ver == 0); int donor = -1; const int sv = 2; // str version. #define SARGS(s) s, strlen(s) //========== sst ========== gu::GTID const empty_gtid; donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home3"), empty_gtid); ck_assert(donor == -EHOSTDOWN); donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home1,home2"), empty_gtid); ck_assert(donor == 1); nodes[1].status = GCS_NODE_STATE_JOINER; donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home1,home2"), empty_gtid); ck_assert(donor == 2); nodes[1].status = GCS_NODE_STATE_SYNCED; // handle dangling comma. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home3,"), empty_gtid); int expect_donor(a ? 1 : arbitr); ck_assert_msg(donor == expect_donor, "stateless: %d, expected donor: %d, result donor: %d", a, expect_donor, donor); // ========== ist ========== // by name. gu::GTID const group_gtid(group.group_uuid, 100); donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home0,home1,home2"), group_gtid); ck_assert(donor == 1); group.quorum.act_id = 1498; // not in safe range. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_gtid); ck_assert(donor == 2); group.quorum.act_id = 1497; // in safe range. in segment. donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_gtid); ck_assert(donor == 1); group.quorum.act_id = 1497; // in safe range. cross segment. nodes[0].status = GCS_NODE_STATE_JOINER; nodes[1].status = GCS_NODE_STATE_JOINER; nodes[2].status = GCS_NODE_STATE_JOINER; donor = gcs_group_find_donor(&group, sv, joiner, SARGS("home2"), group_gtid); ck_assert(donor == 5); nodes[0].status = GCS_NODE_STATE_SYNCED; nodes[1].status = GCS_NODE_STATE_SYNCED; nodes[2].status = GCS_NODE_STATE_SYNCED; #undef SARGS } START_TEST(test_gcs_group_find_donor_stateful) { test_gcs_group_find_donor(false); } END_TEST START_TEST(test_gcs_group_find_donor_stateless) { test_gcs_group_find_donor(true); } END_TEST Suite *gcs_group_suite(void) { Suite *suite = suite_create("GCS group context"); TCase *tcase = tcase_create("gcs_group"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_group_configuration); tcase_add_test (tcase, gcs_group_last_applied_v0); tcase_add_test (tcase, gcs_group_last_applied_v1); tcase_add_test (tcase, gcs_group_last_applied_v2); tcase_add_test (tcase, gcs_group_last_applied_v3); tcase_add_test (tcase, gcs_group_last_applied_v4); tcase_add_test (tcase, gcs_group_last_applied_v5); tcase_add_test (tcase, test_stateless_flag_false); tcase_add_test (tcase, test_stateless_flag_true); tcase_add_test (tcase, test_gcs_group_find_donor_stateful); tcase_add_test (tcase, test_gcs_group_find_donor_stateless); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_node_test.cpp000644 000164 177776 00000003250 15107057155 023604 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ */ #include "../gcs_node.hpp" #include "gcs_node_test.hpp" // must be included last #define NODE_ID "owpiefd[woie" #define NODE_NAME "strange name" #define NODE_ADDR "0.0.0.0:0" START_TEST (gcs_node_test) { /* this is a small unit test as node unit does almost nothing */ gcs_node_t node1, node2; static const gcs_seqno_t seqno = 333; gcs_node_init (&node1, NULL, NODE_ID, NODE_NAME, NODE_ADDR, 0, 0, 0, 0, false); gcs_node_init (&node2, NULL, "baka", NULL, NULL, 0, 0, 0, 0, true); ck_assert_msg(!strcmp(node1.id, NODE_ID), "Expected node id '%s', " "found '%s'", NODE_ID, node1.id); ck_assert_msg(!strcmp(node1.name, NODE_NAME), "Expected node name '%s', " "found '%s'", NODE_NAME, node1.name); ck_assert_msg(!strcmp(node1.inc_addr, NODE_ADDR), "Expected node id '%s', " "found '%s'", NODE_ADDR, node1.inc_addr); ck_assert(!gcs_node_get_last_applied(&node1)); gcs_node_set_last_applied (&node1, seqno); mark_point(); gcs_node_move (&node2, &node1); ck_assert_msg(seqno == gcs_node_get_last_applied (&node2), "move didn't preserve last_applied"); ck_assert_msg(!strcmp(node2.id, NODE_ID), "Expected node id '%s', found '%s'", NODE_ID, node2.id); gcs_node_reset (&node1); mark_point(); gcs_node_free (&node2); } END_TEST Suite *gcs_node_suite(void) { Suite *suite = suite_create("GCS node context"); TCase *tcase = tcase_create("gcs_node"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_node_test); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_node_test.hpp000644 000164 177776 00000000334 15107057155 023611 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_node_test__ #define __gcs_node_test__ #include extern Suite *gcs_node_suite(void); #endif /* __gu_node_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_backend_test.cpp000644 000164 177776 00000004707 15107057155 024256 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include #include "../gcs_backend.hpp" #include "gcs_backend_test.hpp" // must be included last // Fake backend definitons. Must be global for gcs_backend.c to see GCS_BACKEND_NAME_FN(gcs_test_name) { return "DUMMIEEEE!"; } GCS_BACKEND_CREATE_FN(gcs_test_create) { backend->name = gcs_test_name; return 0; } GCS_BACKEND_NAME_FN(gcs_spread_name) { return "SPREAT"; } GCS_BACKEND_CREATE_FN(gcs_spread_create) { backend->name = gcs_spread_name; return 0; } GCS_BACKEND_NAME_FN(gcs_vs_name) { return "vsssssssss"; } GCS_BACKEND_CREATE_FN(gcs_vs_create) { backend->name = gcs_vs_name; return 0; } GCS_BACKEND_NAME_FN(gcs_gcomm_name) { return "gCOMMMMM!!!"; } GCS_BACKEND_CREATE_FN(gcs_gcomm_create) { backend->name = gcs_gcomm_name; return 0; } START_TEST (gcs_backend_test) { gcs_backend_t backend; long ret; gu_config_t* config = gu_config_create (); ck_assert(config != NULL); ret = gcs_backend_init (&backend, "wrong://kkk", config); ck_assert(ret == -ESOCKTNOSUPPORT); ret = gcs_backend_init (&backend, "spread:", config); ck_assert(ret == -EINVAL); ret = gcs_backend_init (&backend, "dummy://", config); ck_assert_msg(ret == 0, "ret = %ld (%s)", ret, strerror(-ret)); backend.destroy(&backend); // ck_assert(backend.name == gcs_test_name); this test is broken since we can // no longer use global gcs_dummy_create() symbol because linking with real // gcs_dummy.o ret = gcs_backend_init (&backend, "gcomm://0.0.0.0:4567", config); #ifdef GCS_USE_GCOMM ck_assert_msg(ret == 0, "ret = %d (%s)", ret, strerror(-ret)); ck_assert(backend.name == gcs_gcomm_name); backend.destroy(&backend); #else ck_assert(ret == -ESOCKTNOSUPPORT); #endif // ret = gcs_backend_init (&backend, "vsbes://kkk"); // ck_assert_msg(ret == 0, "ret = %d (%s)", ret, strerror(-ret)); // ck_assert(backend.name == gcs_vs_name); // ret = gcs_backend_init (&backend, "spread://"); // ck_assert_msg(ret == 0, "ret = %d (%s)", ret, strerror(-ret)); // ck_assert(backend.name == gcs_spread_name); gu_config_destroy(config); } END_TEST Suite *gcs_backend_suite(void) { Suite *suite = suite_create("GCS backend interface"); TCase *tcase = tcase_create("gcs_backend"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_backend_test); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_state_msg_test.hpp000644 000164 177776 00000000341 15107057155 024650 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2015 Codership Oy // $Id$ #ifndef __gcs_state_msg_test__ #define __gcs_state_msg_test__ #include Suite *gcs_state_msg_suite(void); #endif /* __gcs_state_msg_test__ */ galera-4-26.4.25/gcs/src/unit_tests/CMakeLists.txt000644 000164 177776 00000002333 15107057155 023021 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_executable(gcs_tests gcs_tests.cpp gcs_test_utils.cpp gcs_act_cchange_test.cpp ../gcs_act_cchange.cpp gcs_fifo_test.cpp ../gcs_fifo_lite.cpp gcs_sm_test.cpp ../gcs_sm.cpp gcs_comp_test.cpp ../gcs_comp_msg.cpp gcs_state_msg_test.cpp ../gcs_state_msg.cpp gcs_backend_test.cpp ../gcs_backend.cpp gcs_proto_test.cpp ../gcs_act_proto.cpp gcs_defrag_test.cpp ../gcs_defrag.cpp gcs_node_test.cpp ../gcs_node.cpp gcs_group_test.cpp gcs_memb_test.cpp ../gcs_group.cpp gcs_core_test.cpp ../gcs_code_msg.cpp ../gcs_core.cpp ../gcs_dummy.cpp ../gcs_msg_type.cpp ../gcs.cpp ../gcs_params.cpp gcs_fc_test.cpp ../gcs_fc.cpp ../gcs_error.cpp ) target_compile_definitions(gcs_tests PRIVATE -DGALERA_LOG_H_ENABLE_CXX -DGCS_CORE_TESTING -DGCS_DUMMY_TESTING ) if (GALERA_NONDETERMINISTIC_TESTS) target_compile_definitions(gcs_tests PRIVATE -DGCS_ALLOW_GH74 ) endif() # TODO: Fix. target_compile_options(gcs_tests PRIVATE -Wno-conversion -Wno-unused-parameter -Wno-vla ) target_link_libraries(gcs_tests gcache ${GALERA_UNIT_TEST_LIBS} ) add_test( NAME gcs_tests COMMAND gcs_tests ) galera-4-26.4.25/gcs/src/unit_tests/gcs_tests.cpp000644 000164 177776 00000004363 15107057155 022770 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2017 Codership Oy * * $Id$ */ #include // printf() #include // strcmp() #include // EXIT_SUCCESS | EXIT_FAILURE #include #include #include "gcs_comp_test.hpp" #include "gcs_sm_test.hpp" #include "gcs_state_msg_test.hpp" #include "gcs_fifo_test.hpp" #include "gcs_proto_test.hpp" #include "gcs_defrag_test.hpp" #include "gcs_node_test.hpp" #include "gcs_memb_test.hpp" #include "gcs_act_cchange_test.hpp" #include "gcs_group_test.hpp" #include "gcs_backend_test.hpp" #include "gcs_core_test.hpp" #include "gcs_fc_test.hpp" typedef Suite *(*suite_creator_t)(void); static suite_creator_t suites[] = { gcs_comp_suite, gcs_send_monitor_suite, gcs_state_msg_suite, gcs_fifo_suite, gcs_proto_suite, gcs_defrag_suite, gcs_node_suite, gcs_memb_suite, gcs_act_cchange_suite, gcs_group_suite, gcs_backend_suite, gcs_core_suite, gcs_fc_suite, NULL }; #define LOG_FILE "gcs_tests.log" int main(int argc, char* argv[]) { bool const nofork(((argc > 1) && !strcmp(argv[1], "nofork")) ? true : false); int i = 0; int failed = 0; FILE* log_file = NULL; if (!nofork) { log_file = fopen (LOG_FILE, "w"); if (!log_file) return EXIT_FAILURE; gu_conf_set_log_file (log_file); } gu_conf_debug_on(); gu_conf_self_tstamp_on(); while (suites[i]) { SRunner* sr = srunner_create(suites[i]()); gu_info ("#########################"); gu_info ("Test %d.", i); gu_info ("#########################"); if (nofork) srunner_set_fork_status(sr, CK_NOFORK); srunner_run_all (sr, CK_NORMAL); failed += srunner_ntests_failed (sr); srunner_free (sr); i++; } if (log_file) fclose (log_file); printf ("Total test failed: %d\n", failed); if (0 == failed && 0 != log_file) ::unlink(LOG_FILE); return (failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; } /* When the suite compiled in debug mode, returns number of allocated bytes */ ssize_t gcs_tests_get_allocated() { ssize_t total; ssize_t allocs; ssize_t reallocs; ssize_t deallocs; void gu_mem_stats (ssize_t*, ssize_t*, ssize_t*, ssize_t*); gu_mem_stats (&total, &allocs, &reallocs, &deallocs); return total; } galera-4-26.4.25/gcs/src/unit_tests/gcs_act_cchange_test.hpp000644 000164 177776 00000000344 15107057155 025104 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2015 Codership Oy // $Id$ #ifndef __gcs_act_cchange_test__ #define __gcs_act_cchange_test__ #include Suite *gcs_act_cchange_suite(void); #endif /* __gcs_act_cchange_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_fifo_test.hpp000644 000164 177776 00000000315 15107057155 023606 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2015 Codership Oy // $Id$ #ifndef __gcs_fifo_test__ #define __gcs_fifo_test__ #include Suite *gcs_fifo_suite(void); #endif /* __gcs_fifo_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_fc_test.hpp000644 000164 177776 00000000300 15107057155 023245 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010 Codership Oy // $Id$ #ifndef __gcs_fc_test__ #define __gcs_fc_test__ #include Suite *gcs_fc_suite(void); #endif /* __gcs_fc_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_fc_test.cpp000644 000164 177776 00000010205 15107057155 023245 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2020 Codership Oy // $Id$ #include "../gcs_fc.hpp" #include "gcs_fc_test.hpp" // must be included last START_TEST(gcs_fc_test_limits) { gcs_fc_t fc; int ret; ret = gcs_fc_init (&fc, 16, 0.5, 0.1); ck_assert(ret == 0); ret = gcs_fc_init (&fc, -1, 0.5, 0.1); ck_assert(ret == -EINVAL); ret = gcs_fc_init (&fc, 16, 1.0, 0.1); ck_assert(ret == -EINVAL); ret = gcs_fc_init (&fc, 16, 0.5, 1.0); ck_assert(ret == -EINVAL); } END_TEST /* This is a macro to preserve line numbers in ck_assert_msg() output */ #define SKIP_N_ACTIONS(fc_,n_) \ { \ int i; \ for (i = 0; i < n_; ++i) \ { \ long long ret = gcs_fc_process (fc_, 0); \ ck_assert_msg(ret == 0, "0-sized action #%d returned %lld (%s)", \ i, ret, strerror(-ret)); \ } \ } START_TEST(gcs_fc_test_basic) { gcs_fc_t fc; int ret; long long pause; ret = gcs_fc_init (&fc, 16, 0.5, 0.1); ck_assert(ret == 0); gcs_fc_reset (&fc, 8); usleep (1000); SKIP_N_ACTIONS(&fc, 7); /* Here we exceed soft limit almost instantly, which should give a very high * data rate and as a result a need to sleep */ pause = gcs_fc_process (&fc, 7); ck_assert_msg(pause > 0, "Soft limit trip returned %lld (%s)", pause, strerror(-pause)); gcs_fc_reset (&fc, 7); usleep (1000); SKIP_N_ACTIONS(&fc, 7); /* Here we reach soft limit almost instantly, which should give a very high * data rate, but soft limit is not exceeded, so no sleep yet. */ pause = gcs_fc_process (&fc, 1); ck_assert_msg(pause == 0, "Soft limit touch returned %lld (%s)", pause, strerror(-pause)); SKIP_N_ACTIONS(&fc, 7); usleep (1000); pause = gcs_fc_process (&fc, 7); ck_assert_msg(pause > 0, "Soft limit trip returned %lld (%s)", pause, strerror(-pause)); /* hard limit excess should be detected instantly */ pause = gcs_fc_process (&fc, 1); ck_assert_msg(pause == -ENOMEM, "Hard limit trip returned %lld (%s)", pause, strerror(-pause)); } END_TEST static inline bool double_equals (double a, double b) { static double const eps = 0.001; double diff = (a - b) / (a + b); // roughly relative difference return !(diff > eps || diff < -eps); } START_TEST(gcs_fc_test_precise) { gcs_fc_t fc; long long ret; struct timespec p10ms = {0, 10000000 }; // 10 ms ret = gcs_fc_init (&fc, 2000, 0.5, 0.5); ck_assert(ret == 0); gcs_fc_reset (&fc, 500); SKIP_N_ACTIONS(&fc, 7); nanosleep (&p10ms, NULL); ret = gcs_fc_process (&fc, 1000); ck_assert_msg(ret > 0, "Soft limit trip returned %lld (%s)", ret, strerror(-ret)); // measured data rate should be ~100000 b/s // slave queue length should be half-way between soft limit and hard limit // desired rate should be half between 1.0 and 0.5 of full rate -> 75000 b/s // excess over soft limit is 500 and corresponding interval: 5ms // (500/5ms == 100000 b/s) // additional sleep must be 1.6667 ms (500/(5 + 1.6667) ~ 75000 b/s) double const correction = 100000.0/fc.max_rate; // due to imprecise sleep double const expected_sleep = 0.001666667*correction; double sleep = ((double)ret)*1.0e-9; ck_assert_msg(double_equals(sleep, expected_sleep), "Sleep: %f, expected %f", sleep, expected_sleep); } END_TEST Suite *gcs_fc_suite(void) { Suite *s = suite_create("GCS state transfer FC"); TCase *tc = tcase_create("gcs_fc"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_fc_test_limits); tcase_add_test (tc, gcs_fc_test_basic); tcase_add_test (tc, gcs_fc_test_precise); return s; } galera-4-26.4.25/gcs/src/unit_tests/gcs_proto_test.cpp000644 000164 177776 00000010005 15107057155 024016 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include "../gcs_act_proto.hpp" #include "gcs_proto_test.hpp" // must be included last static long frgcmp (gcs_act_frag_t* f1, gcs_act_frag_t* f2) { if ( (f1->act_id == f2->act_id) && (f1->act_size == f2->act_size) && (f1->act_type == f2->act_type) && (f1->frag_len == f2->frag_len) && // expect to point (f1->frag == f2->frag) // at the same buffer here ) return 0; else return -1; } START_TEST (gcs_proto_test) { const char act_send[] = "Test action smuction"; const char* act_send_ptr = act_send; char act_recv[] = "owoeijrvfokpvfcsdnfvkmk;l"; char* act_recv_ptr = act_recv; const size_t buf_len = 32; char buf[buf_len]; gcs_act_frag_t frg_send, frg_recv; long ret; frg_send.act_id = getpid(); frg_send.act_size = strlen (act_send); frg_send.frag = NULL; frg_send.frag_len = 0; frg_send.frag_no = 0; frg_send.act_type = (gcs_act_type_t)0; frg_send.proto_ver = 0; // set up action header ret = gcs_act_proto_write (&frg_send, buf, buf_len); ck_assert_msg(0 == ret, "error code: %ld", ret); ck_assert(frg_send.frag != NULL); ck_assert(frg_send.frag_len != 0); ck_assert_msg(strlen(act_send) >= frg_send.frag_len, "Expected fragmentation, but action seems to fit in buffer" " - increase send action length"); // write action to the buffer, it should not fit strncpy ((char*)frg_send.frag, act_send_ptr, frg_send.frag_len); act_send_ptr += frg_send.frag_len; // message was sent and received, now parse the header ret = gcs_act_proto_read (&frg_recv, buf, buf_len); ck_assert_msg(0 == ret, "error code: %ld", ret); ck_assert(frg_recv.frag != NULL); ck_assert(frg_recv.frag_len != 0); ck_assert_msg(!frgcmp(&frg_send, &frg_recv), "Sent and recvd headers are not identical"); ck_assert_msg(frg_send.frag_no == frg_recv.frag_no, "Fragment numbers are not identical: %lu %lu", frg_send.frag_no, frg_recv.frag_no); // read the fragment into receiving action buffer // FIXME: this works by sheer luck - only because strncpy() pads // the remaining buffer space with 0 strncpy (act_recv_ptr, (const char*)frg_recv.frag, frg_recv.frag_len); act_recv_ptr += frg_recv.frag_len; // send the second fragment. Increment the fragment counter gcs_act_proto_inc (buf); // should be 1 now // write action to the buffer, it should fit now strncpy ((char*)frg_send.frag, act_send_ptr, frg_send.frag_len); // act_send_ptr += frg_send.frag_len; // message was sent and received, now parse the header ret = gcs_act_proto_read (&frg_recv, buf, buf_len); ck_assert_msg(0 == ret, "error code: %ld", ret); ck_assert_msg(!frgcmp(&frg_send, &frg_recv), "Sent and recvd headers are not identical"); ck_assert_msg(frg_send.frag_no + 1 == frg_recv.frag_no, "Fragment numbers are not sequential: %lu %lu", frg_send.frag_no, frg_recv.frag_no); // read the fragment into receiving action buffer // FIXME: this works by sheer luck - only because strncpy() pads // the remaining buffer space with 0 strncpy (act_recv_ptr, (const char*)frg_recv.frag, frg_recv.frag_len); ck_assert_msg(strlen(act_recv_ptr) < frg_send.frag_len, "Fragment does not seem to fit in buffer: '%s'(%zu)", act_recv_ptr, strlen(act_recv_ptr)); // check that actions are identical ck_assert_msg(!strcmp(act_send, act_recv), "Actions don't match: '%s' -- '%s'", act_send, act_recv); } END_TEST Suite *gcs_proto_suite(void) { Suite *suite = suite_create("GCS core protocol"); TCase *tcase = tcase_create("gcs_proto"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_proto_test); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_memb_test.cpp000644 000164 177776 00000007717 15107057155 023613 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011-2020 Codership Oy * * $Id$ */ #include "gcs_test_utils.hpp" #include "../gcs_group.hpp" #include "../gcs_comp_msg.hpp" #include "gu_uuid.h" #include "gcs_memb_test.hpp" // must be included last using namespace gcs_test; /* Thes test was specifically created to reproduce #465 */ START_TEST(gcs_memb_test_465) { struct gt_group group; ck_assert(group.nodes_num == 0); struct gt_node nodes[GT_MAX_NODES]; int i; ssize_t ret = 0; // initialize individual node structures for (i = 0; i < GT_MAX_NODES; i++) { int const str_len = 32; char name_str[str_len]; char addr_str[str_len]; sprintf(name_str, "node%d", i); sprintf(addr_str, "addr%d", i); nodes[i].group.init(name_str, addr_str, 1, 0, 0); } gcs_node_state_t node_state; // bootstrap the cluster group.add_node(&nodes[0], true); ck_assert(nodes[0].group.state() == GCS_GROUP_PRIMARY); node_state = nodes[0].state(); ck_assert(node_state == GCS_NODE_STATE_JOINED); group.deliver_join_sync_msg(0, GCS_MSG_SYNC); node_state = nodes[0].state(); ck_assert(node_state == GCS_NODE_STATE_SYNCED); group.add_node(&nodes[1], true); ck_assert(nodes[1].group.state() == GCS_GROUP_PRIMARY); node_state = nodes[1].state(); ck_assert(node_state == GCS_NODE_STATE_PRIM); // need sst group.add_node(&nodes[2], true); ck_assert(nodes[2].group.state() == GCS_GROUP_PRIMARY); node_state = nodes[2].state(); ck_assert(node_state == GCS_NODE_STATE_PRIM); // need sst ck_assert(group.verify_node_state_across(0, GCS_NODE_STATE_SYNCED)); group.sst_start(2, nodes[0].group()->nodes[0].name); mark_point(); group.deliver_join_sync_msg(0, GCS_MSG_JOIN); // end of donor SST group.deliver_join_sync_msg(0, GCS_MSG_SYNC); // donor synced group.deliver_join_sync_msg(2, GCS_MSG_SYNC); // joiner can't sync ck_assert(group.verify_node_state_across(2, GCS_NODE_STATE_JOINER)); group.deliver_join_sync_msg(2, GCS_MSG_JOIN); // end of joiner SST group.deliver_join_sync_msg(2, GCS_MSG_SYNC); // joiner synced ck_assert(group.verify_node_state_across(0, GCS_NODE_STATE_SYNCED)); ck_assert(group.verify_node_state_across(1, GCS_NODE_STATE_PRIM)); ck_assert(group.verify_node_state_across(2, GCS_NODE_STATE_SYNCED)); group.sst_start(1, nodes[0].group()->nodes[0].name); group.deliver_join_sync_msg(0, GCS_MSG_JOIN); // end of donor SST group.deliver_join_sync_msg(1, GCS_MSG_JOIN); // end of joiner SST struct gt_node* dropped = group.drop_node(1); ck_assert(NULL != dropped); /* After that, according to #465, node 1 shifted from SYNCED to PRIMARY */ ck_assert(group.verify_node_state_across(1, GCS_NODE_STATE_SYNCED)); struct gcs_act_rcvd rcvd; int proto_ver = -1; ret = gcs_group_act_conf(group.nodes[1]->group(), &rcvd, &proto_ver); struct gcs_act* const act(&rcvd.act); ck_assert_msg(ret > 0, "gcs_group_act_cnf() retruned %zd (%s)", ret, strerror (-ret)); ck_assert(ret == act->buf_len); ck_assert_msg(proto_ver == 1 /* current version */, "proto_ver = %d", proto_ver); const gcs_act_cchange conf(act->buf, act->buf_len); int const my_idx(rcvd.id); ck_assert(my_idx == 1); group.deliver_join_sync_msg(0, GCS_MSG_SYNC); // donor synced ck_assert(group.verify_node_state_across(0, GCS_NODE_STATE_SYNCED)); group.nodes[1]->group.gcache()->free(const_cast(act->buf)); while (group.nodes_num) { struct gt_node* dropped = group.drop_node(0); ck_assert(NULL != dropped); } ck_assert(0 == group.nodes_num); } END_TEST Suite *gcs_memb_suite(void) { Suite *suite = suite_create("GCS membership changes"); TCase *tcase = tcase_create("gcs_memb"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_memb_test_465); tcase_set_timeout(tcase, 30); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_comp_test.hpp000644 000164 177776 00000000334 15107057155 023622 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_comp_test__ #define __gcs_comp_test__ #include extern Suite *gcs_comp_suite(void); #endif /* __gu_comp_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_state_msg_test.cpp000644 000164 177776 00000111654 15107057155 024655 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #define GCS_STATE_MSG_ACCESS #include "../gcs_state_msg.hpp" #include "gcs_state_msg_test.hpp" // must be included last #include "gu_inttypes.hpp" static int const QUORUM_VERSION = 6; START_TEST (gcs_state_msg_test_basic) { ssize_t send_len, ret; gu_uuid_t state_uuid; gu_uuid_t group_uuid; gu_uuid_t prim_uuid; gcs_state_msg_t* send_state; gcs_state_msg_t* recv_state; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group_uuid, NULL, 0); gu_uuid_generate (&prim_uuid, NULL, 0); gcs_seqno_t const prim_seqno(457); gcs_seqno_t const received(3456); gcs_seqno_t const cached(2345); gcs_seqno_t const last_applied(3450); gcs_seqno_t const vote_seqno(3449); int64_t const vote_res(0x1244567879012345ULL); int const vote_policy(0); int const prim_joined(5); gcs_node_state_t const prim_state(GCS_NODE_STATE_JOINED); gcs_node_state_t const current_state(GCS_NODE_STATE_NON_PRIM); const char* const name("MyName"); const char* const inc_addr("192.168.0.1:2345"); int const gcs_proto_ver(2); int const repl_proto_ver(1); int const appl_proto_ver(2); int const prim_gcs_ver(0); int const prim_repl_ver(1); int const prim_appl_ver(1); int const desync_count(0); int const flags(GCS_STATE_FREP); send_state = gcs_state_msg_create (&state_uuid, &group_uuid, &prim_uuid, prim_seqno, received, // last received seqno cached, // last cached seqno last_applied, // last applied vote_seqno, // last vote seqno vote_res, // last vote result vote_policy, // voting protocol prim_joined, // prim_joined prim_state, // prim_state current_state, // current_state name, // name inc_addr, // inc_addr gcs_proto_ver, // gcs_proto_ver repl_proto_ver, // repl_proto_ver appl_proto_ver, // appl_proto_ver prim_gcs_ver, // prim_gcs_ver prim_repl_ver, // prim_repl_ver prim_appl_ver, // prim_appl_ver desync_count, // desync_count flags // flags ); ck_assert(NULL != send_state); ck_assert(send_state->flags == flags); ck_assert(send_state->gcs_proto_ver == gcs_proto_ver); ck_assert(send_state->repl_proto_ver == repl_proto_ver); ck_assert(send_state->appl_proto_ver == appl_proto_ver); ck_assert_msg(send_state->received == received, "Last received seqno: sent %" PRId64 ", recv %" PRId64, send_state->received, received); ck_assert_msg(send_state->cached == cached, "Last cached seqno: sent %" PRId64 ", recv %" PRId64, send_state->cached, cached); ck_assert(send_state->prim_seqno == prim_seqno); ck_assert(send_state->current_state == current_state); ck_assert(send_state->prim_state == prim_state); ck_assert(send_state->prim_joined == prim_joined); ck_assert(!gu_uuid_compare (&send_state->state_uuid, &state_uuid)); ck_assert(!gu_uuid_compare (&send_state->group_uuid, &group_uuid)); ck_assert(!gu_uuid_compare (&send_state->prim_uuid, &prim_uuid)); ck_assert(!strcmp(send_state->name, name)); ck_assert(!strcmp(send_state->inc_addr, inc_addr)); { size_t str_len = 1024; char send_str[str_len]; ck_assert(gcs_state_msg_snprintf(send_str, str_len, send_state) > 0); } //v1-2 ck_assert(send_state->appl_proto_ver == appl_proto_ver); //v3 ck_assert(send_state->cached == cached); //v4 ck_assert(send_state->desync_count == desync_count); //v5 ck_assert(send_state->last_applied == last_applied); ck_assert(send_state->vote_seqno == vote_seqno); ck_assert(send_state->vote_res == vote_res); ck_assert(send_state->vote_policy == vote_policy); send_len = gcs_state_msg_len (send_state); ck_assert_msg(send_len >= 0, "gcs_state_msg_len() returned %zd (%s)", send_len, strerror (-send_len)); { uint8_t send_buf[send_len]; ret = gcs_state_msg_write (send_buf, send_state); ck_assert_msg(ret == send_len, "Return value does not match send_len: " "expected %zd, got %zd", send_len, ret); recv_state = gcs_state_msg_read (send_buf, send_len); ck_assert(NULL != recv_state); } ck_assert(send_state->flags == recv_state->flags); ck_assert(send_state->gcs_proto_ver == recv_state->gcs_proto_ver); ck_assert(send_state->repl_proto_ver == recv_state->repl_proto_ver); ck_assert_msg(recv_state->repl_proto_ver == 1, "repl_proto_ver: %d", recv_state->repl_proto_ver); ck_assert(send_state->appl_proto_ver == recv_state->appl_proto_ver); ck_assert_msg(recv_state->appl_proto_ver == 2, "appl_proto_ver: %d", recv_state->appl_proto_ver); ck_assert(send_state->prim_gcs_ver == recv_state->prim_gcs_ver); ck_assert_msg(recv_state->prim_gcs_ver == 0, "prim_gcs_ver: %d", recv_state->prim_appl_ver); ck_assert(send_state->prim_repl_ver == recv_state->prim_repl_ver); ck_assert_msg(recv_state->prim_repl_ver == 1, "prim_repl_ver: %d", recv_state->prim_appl_ver); ck_assert(send_state->prim_appl_ver == recv_state->prim_appl_ver); ck_assert_msg(recv_state->prim_appl_ver == 1, "prim_appl_ver: %d", recv_state->prim_appl_ver); ck_assert_msg(send_state->received == recv_state->received, "Last received seqno: sent %" PRId64 " , recv %" PRId64, send_state->received, recv_state->received); ck_assert_msg(send_state->cached == recv_state->cached, "Last cached seqno: sent %" PRId64 ", recv %" PRId64, send_state->cached, recv_state->cached); ck_assert(send_state->prim_seqno == recv_state->prim_seqno); ck_assert(send_state->current_state == recv_state->current_state); ck_assert(send_state->prim_state == recv_state->prim_state); ck_assert(send_state->prim_joined == recv_state->prim_joined); ck_assert(!gu_uuid_compare(&recv_state->state_uuid, &state_uuid)); ck_assert(!gu_uuid_compare(&recv_state->group_uuid, &group_uuid)); ck_assert(!gu_uuid_compare(&recv_state->prim_uuid, &prim_uuid)); ck_assert(!strcmp(send_state->name, recv_state->name)); ck_assert(!strcmp(send_state->inc_addr, recv_state->inc_addr)); { size_t str_len = 1024; char str[str_len]; ck_assert(gcs_state_msg_snprintf(str, str_len, send_state) > 0); ck_assert(gcs_state_msg_snprintf(str, str_len, recv_state) > 0); } //v1-2 ck_assert(send_state->appl_proto_ver == recv_state->appl_proto_ver); //v3 ck_assert(send_state->cached == recv_state->cached); //v4 ck_assert(send_state->desync_count == recv_state->desync_count); //v5 ck_assert(send_state->last_applied == recv_state->last_applied); ck_assert(send_state->vote_seqno == recv_state->vote_seqno); ck_assert(send_state->vote_res == recv_state->vote_res); ck_assert(send_state->vote_policy == recv_state->vote_policy); gcs_state_msg_destroy (send_state); gcs_state_msg_destroy (recv_state); } END_TEST START_TEST (gcs_state_msg_test_quorum_inherit) { gcs_state_msg_t* st[3] = { NULL, }; gu_uuid_t state_uuid; gu_uuid_t group1_uuid, group2_uuid; gu_uuid_t prim1_uuid, prim2_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group1_uuid, NULL, 0); gu_uuid_generate (&group2_uuid, NULL, 0); gu_uuid_generate (&prim1_uuid, NULL, 0); gu_uuid_generate (&prim2_uuid, NULL, 0); gcs_seqno_t prim1_seqno = 123; gcs_seqno_t prim2_seqno = 834; gcs_seqno_t act1_seqno = 345; gcs_seqno_t act2_seqno = 239472508908LL; gcs_state_quorum_t quorum; mark_point(); /* First just nodes from different groups and configurations, none JOINED */ st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno - 1, act2_seqno - 1, act2_seqno -1, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node0", "", 0, 1, 1, 0, 0, 0, 0, 0); ck_assert(NULL != st[0]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 1, act1_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 2, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node2", "", 0, 1, 1, 0, 0, 0, 0, 1); ck_assert(NULL != st[2]); gu_info (" Inherited 1"); int ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(false == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); ck_assert(GCS_SEQNO_ILL == quorum.act_id); ck_assert(GCS_SEQNO_ILL == quorum.conf_id); ck_assert(-1 == quorum.gcs_proto_ver); ck_assert(-1 == quorum.repl_proto_ver); ck_assert(-1 == quorum.appl_proto_ver); /* now make node1 inherit PC */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 3, act1_seqno - 2, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_DONOR, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); gu_info (" Inherited 2"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group1_uuid)); ck_assert(act1_seqno == quorum.act_id); ck_assert(prim1_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); /* now make node0 inherit PC (should yield conflicting uuids) */ gcs_state_msg_destroy (st[0]); st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno - 1, act2_seqno - 1, -1, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "node0", "", 0, 1, 1, 0, 0, 0, 0, 0); ck_assert(NULL != st[0]); gu_info (" Inherited 3"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(false == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); ck_assert(GCS_SEQNO_ILL == quorum.act_id); ck_assert(GCS_SEQNO_ILL == quorum.conf_id); ck_assert(-1 == quorum.gcs_proto_ver); ck_assert(-1 == quorum.repl_proto_ver); ck_assert(-1 == quorum.appl_proto_ver); /* now make node1 non-joined again: group2 should win */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno -3, act1_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_PRIM, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); gu_info (" Inherited 4"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); ck_assert(act2_seqno - 1 == quorum.act_id); ck_assert(prim2_seqno - 1 == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); /* now make node2 joined: it should become a representative */ gcs_state_msg_destroy (st[2]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 2, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_SYNCED, "node2", "", 0, 1, 1, 0, 1, 0, 0, 0); ck_assert(NULL != st[2]); gu_info (" Inherited 5"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); ck_assert(act2_seqno == quorum.act_id); ck_assert(prim2_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); gcs_state_msg_destroy (st[0]); gcs_state_msg_destroy (st[1]); gcs_state_msg_destroy (st[2]); } END_TEST START_TEST (gcs_state_msg_test_quorum_remerge) { gcs_state_msg_t* st[3] = { NULL, }; gu_uuid_t state_uuid; gu_uuid_t group1_uuid, group2_uuid; gu_uuid_t prim0_uuid, prim1_uuid, prim2_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group1_uuid, NULL, 0); gu_uuid_generate (&group2_uuid, NULL, 0); gu_uuid_generate (&prim0_uuid, NULL, 0); gu_uuid_generate (&prim1_uuid, NULL, 0); gu_uuid_generate (&prim2_uuid, NULL, 0); gcs_seqno_t prim1_seqno = 123; gcs_seqno_t prim2_seqno = 834; gcs_seqno_t act1_seqno = 345; gcs_seqno_t act2_seqno = 239472508908LL; gcs_state_quorum_t quorum; mark_point(); /* First just nodes from different groups and configurations, none JOINED */ st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim0_uuid, prim2_seqno - 1, act2_seqno - 1,act2_seqno -2, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node0", "", 0, 1, 1, 0, 0, 0, 0, 0); ck_assert(NULL != st[0]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 3, act1_seqno - 2, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, -1, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_JOINER,GCS_NODE_STATE_NON_PRIM, "node2", "", 0, 1, 1, 0, 0, 0, 0, 1); ck_assert(NULL != st[2]); gu_info (" Remerged 1"); int ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(false == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); ck_assert(GCS_SEQNO_ILL == quorum.act_id); ck_assert(GCS_SEQNO_ILL == quorum.conf_id); ck_assert(-1 == quorum.gcs_proto_ver); ck_assert(-1 == quorum.repl_proto_ver); ck_assert(-1 == quorum.appl_proto_ver); /* Now make node0 to be joined at least once */ gcs_state_msg_destroy (st[0]); st[0] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim0_uuid, prim2_seqno - 1, act2_seqno - 1, -1, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_DONOR, GCS_NODE_STATE_NON_PRIM, "node0", "", 0, 1, 1, 0, 0, 0, 3, 0); ck_assert(NULL != st[0]); ck_assert(3 == gcs_state_msg_get_desync_count(st[0])); gu_info (" Remerged 2"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); ck_assert(act2_seqno - 1 == quorum.act_id); ck_assert(prim2_seqno - 1 == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); /* Now make node2 to be joined too */ gcs_state_msg_destroy (st[2]); st[2] = gcs_state_msg_create (&state_uuid, &group2_uuid, &prim2_uuid, prim2_seqno, act2_seqno, act2_seqno - 3, act2_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 5, GCS_NODE_STATE_JOINED,GCS_NODE_STATE_NON_PRIM, "node2", "", 0, 1, 1, 0, 0, 0, 0, 1); ck_assert(NULL != st[2]); gu_info (" Remerged 3"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); ck_assert(act2_seqno == quorum.act_id); ck_assert(prim2_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); /* now make node1 joined too: conflict */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno, act1_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_SYNCED,GCS_NODE_STATE_NON_PRIM, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); gu_info (" Remerged 4"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(false == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &GU_UUID_NIL)); ck_assert(GCS_SEQNO_ILL == quorum.act_id); ck_assert(GCS_SEQNO_ILL == quorum.conf_id); ck_assert(-1 == quorum.gcs_proto_ver); ck_assert(-1 == quorum.repl_proto_ver); ck_assert(-1 == quorum.appl_proto_ver); /* now make node1 current joiner: should be ignored */ gcs_state_msg_destroy (st[1]); st[1] = gcs_state_msg_create (&state_uuid, &group1_uuid, &prim1_uuid, prim1_seqno, act1_seqno, act1_seqno - 2, act1_seqno - 1, GCS_SEQNO_ILL, 0, GCS_VOTE_ZERO_WINS, 3, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_JOINER, "node1", "", 0, 1, 0, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); gu_info (" Remerged 5"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group2_uuid)); ck_assert(act2_seqno == quorum.act_id); ck_assert(prim2_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(0 == quorum.appl_proto_ver); gcs_state_msg_destroy (st[0]); gcs_state_msg_destroy (st[1]); gcs_state_msg_destroy (st[2]); } END_TEST void gcs_state_msg_test_gh24(int const gcs_proto_ver) { gcs_state_msg_t* st[7] = { NULL, }; gu_uuid_t state_uuid, group_uuid; gu_uuid_generate(&state_uuid, NULL, 0); gu_uuid_generate(&group_uuid, NULL, 0); gu_uuid_t prim_uuid1, prim_uuid2; gu_uuid_generate(&prim_uuid1, NULL, 0); gu_uuid_generate(&prim_uuid2, NULL, 0); gcs_seqno_t const prim_seqno1 = 37; int const prim_joined1 = 3; uint8_t const vp1(0); gcs_seqno_t const prim_seqno2 = 35; int const prim_joined2 = 6; uint8_t const vp2(2); gcs_seqno_t const received = prim_seqno2; gcs_seqno_t const cached = 0; gcs_state_quorum_t quorum; // first three are 35. st[0] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, received - 7, GCS_SEQNO_ILL, 0, vp2, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home0", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[0] != 0); ck_assert(gcs_state_msg_vote_policy(st[0]) == vp2); st[1] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, received - 11, GCS_SEQNO_ILL, 0, vp2, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home1", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[1] != 0); st[2] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid2, prim_seqno2, received, cached, received - 5, GCS_SEQNO_ILL, 0, vp2, prim_joined2, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home2", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[2] != 0); // last four are 37. st[3] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, received - 8, GCS_SEQNO_ILL, 0, vp1, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home3", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 3); ck_assert(st[3] != 0); ck_assert(gcs_state_msg_vote_policy(st[3]) == vp1); st[4] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, received - 3, GCS_SEQNO_ILL, 0, vp1, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home4", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[4] != 0); st[5] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, received - 10, GCS_SEQNO_ILL, 0, vp1, prim_joined1, GCS_NODE_STATE_SYNCED, GCS_NODE_STATE_NON_PRIM, "home5", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[5] != 0); st[6] = gcs_state_msg_create(&state_uuid, &group_uuid, &prim_uuid1, prim_seqno1, received, cached, received - 13, GCS_SEQNO_ILL, 0, vp1, prim_joined1, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_NON_PRIM, "home6", "", gcs_proto_ver, 4, 2, 0, 0, 0, 0, 2); ck_assert(st[6] != 0); int ret = gcs_state_msg_get_quorum((const gcs_state_msg_t**)st, 7, &quorum); ck_assert(ret == 0); ck_assert(quorum.primary == true); ck_assert(quorum.conf_id == prim_seqno1); switch (gcs_proto_ver) { case 0: ck_assert_msg(quorum.vote_policy == GCS_VOTE_ZERO_WINS, "found policy %d, expected %d", quorum.vote_policy, GCS_VOTE_ZERO_WINS); break; case 1: ck_assert_msg(quorum.vote_policy == vp1, "found policy %d, expected %d", quorum.vote_policy, vp1); break; default: ck_abort_msg("unsupported GCS protocol: %d", gcs_proto_ver); } for(int i=0;i<7;i++) gcs_state_msg_destroy(st[i]); } START_TEST(gcs_state_msg_test_gh24_0) // also tests vote policy switch { gcs_state_msg_test_gh24(0); } END_TEST START_TEST(gcs_state_msg_test_gh24_1) // also tests vote policy switch { gcs_state_msg_test_gh24(1); } END_TEST /* This test is to test that protocol downgrade is disabled with state * excahnge >= v6 */ static void gcs_state_msg_test_v6_upgrade(int const from_ver) { gcs_state_msg_t* st[3] = { NULL, }; gu_uuid_t state_uuid; gu_uuid_t group_uuid; gu_uuid_t prim_uuid; gu_uuid_generate (&state_uuid, NULL, 0); gu_uuid_generate (&group_uuid, NULL, 0); gu_uuid_generate (&prim_uuid, NULL, 0); gcs_seqno_t prim_seqno = 123; gcs_seqno_t act_seqno = 345; gcs_state_quorum_t quorum; mark_point(); /* Start with "heterogeneous" PC, where node2 is a v4 node */ st[0] = gcs_state_msg_create (&state_uuid, &group_uuid, &prim_uuid, prim_seqno - 1, act_seqno - 1, act_seqno - 1, 0, 0, 0, 0, 3, GCS_NODE_STATE_PRIM, GCS_NODE_STATE_PRIM, "node0", "", 4, 4, 4, 0, 0, 0, 0, 0); ck_assert(NULL != st[0]); st[1] = gcs_state_msg_create (&state_uuid, &group_uuid, &prim_uuid, prim_seqno, act_seqno, act_seqno - 3, 0, 0, 0, 0, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_JOINED, "node1", "", 3, 3, 3, 0, 0, 0, 0, 0); ck_assert(NULL != st[1]); st[2] = gcs_state_msg_create (&state_uuid, &group_uuid, &prim_uuid, prim_seqno, act_seqno, act_seqno - 3, 0, 0, 0, 0, 3, GCS_NODE_STATE_JOINED, GCS_NODE_STATE_JOINED, "node2", "", 0, 1, 1, 0, 0, 0, 0, 0); ck_assert(NULL != st[2]); st[2]->version = from_ver; gu_info (" proto_ver I"); int ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, sizeof(st)/sizeof(gcs_state_msg_t*), &quorum); ck_assert(0 == ret); ck_assert(from_ver == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group_uuid)); ck_assert(act_seqno == quorum.act_id); ck_assert(prim_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(1 == quorum.appl_proto_ver); ck_assert(GCS_VOTE_ZERO_WINS == quorum.vote_policy); #define UPDATE_STATE_MSG(x) \ st[x]->prim_seqno = prim_seqno; \ st[x]->received = act_seqno; \ st[x]->prim_gcs_ver = quorum.gcs_proto_ver; \ st[x]->prim_repl_ver = quorum.repl_proto_ver; \ st[x]->prim_appl_ver = quorum.appl_proto_ver; /* disconnect node2: protocol versions should go up (also bump seqnos) */ prim_seqno++; act_seqno++; UPDATE_STATE_MSG(0); UPDATE_STATE_MSG(1); gu_info (" proto_ver II"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, 2, &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group_uuid)); ck_assert(act_seqno == quorum.act_id); ck_assert(prim_seqno == quorum.conf_id); ck_assert(3 == quorum.gcs_proto_ver); ck_assert(3 == quorum.repl_proto_ver); ck_assert(3 == quorum.appl_proto_ver); ck_assert(0 == quorum.vote_policy); /* reconnect node2: protocol versions should go down for backward * compatibility */ prim_seqno++; act_seqno++; UPDATE_STATE_MSG(0); UPDATE_STATE_MSG(1); gu_info (" proto_ver III"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, 3, &quorum); ck_assert(0 == ret); ck_assert(from_ver == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group_uuid)); ck_assert(act_seqno == quorum.act_id); ck_assert(prim_seqno == quorum.conf_id); ck_assert(0 == quorum.gcs_proto_ver); ck_assert(1 == quorum.repl_proto_ver); ck_assert(1 == quorum.appl_proto_ver); ck_assert(GCS_VOTE_ZERO_WINS == quorum.vote_policy); /* disconnect node2 */ prim_seqno++; act_seqno++; UPDATE_STATE_MSG(0); UPDATE_STATE_MSG(1); gu_info (" proto_ver IV"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, 2, &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group_uuid)); ck_assert(act_seqno == quorum.act_id); ck_assert(prim_seqno == quorum.conf_id); ck_assert(3 == quorum.gcs_proto_ver); ck_assert(3 == quorum.repl_proto_ver); ck_assert(3 == quorum.appl_proto_ver); ck_assert(0 == quorum.vote_policy); /* upgrade node2 */ st[2]->version = QUORUM_VERSION; st[2]->gcs_proto_ver = 2; st[2]->repl_proto_ver = 2; st[2]->appl_proto_ver = 2; /* reconnect node2: this time protocol versions should stay */ prim_seqno++; act_seqno++; UPDATE_STATE_MSG(0); UPDATE_STATE_MSG(1); gu_info (" proto_ver V"); ret = gcs_state_msg_get_quorum ((const gcs_state_msg_t**)st, 3, &quorum); ck_assert(0 == ret); ck_assert(QUORUM_VERSION == quorum.version); ck_assert(true == quorum.primary); ck_assert(0 == gu_uuid_compare(&quorum.group_uuid, &group_uuid)); ck_assert(act_seqno == quorum.act_id); ck_assert(prim_seqno == quorum.conf_id); ck_assert(3 == quorum.gcs_proto_ver); ck_assert(3 == quorum.repl_proto_ver); ck_assert(3 == quorum.appl_proto_ver); ck_assert(0 == quorum.vote_policy); gcs_state_msg_destroy (st[0]); gcs_state_msg_destroy (st[1]); gcs_state_msg_destroy (st[2]); #undef UPDATE_STATE_MSG } START_TEST (gcs_state_msg_test_v4v6_upgrade) { gcs_state_msg_test_v6_upgrade(4); } END_TEST START_TEST (gcs_state_msg_test_v5v6_upgrade) { gcs_state_msg_test_v6_upgrade(5); } END_TEST Suite *gcs_state_msg_suite(void) { Suite *s = suite_create("GCS state message"); TCase *tc_basic = tcase_create("gcs_state_msg_basic"); TCase *tc_inherit = tcase_create("gcs_state_msg_inherit"); TCase *tc_remerge = tcase_create("gcs_state_msg_remerge"); TCase *tc_proto_ver = tcase_create("gcs_state_msg_proto_ver"); suite_add_tcase (s, tc_basic); tcase_add_test (tc_basic, gcs_state_msg_test_basic); suite_add_tcase (s, tc_inherit); tcase_add_test (tc_inherit, gcs_state_msg_test_quorum_inherit); suite_add_tcase (s, tc_remerge); tcase_add_test (tc_remerge, gcs_state_msg_test_quorum_remerge); tcase_add_test (tc_remerge, gcs_state_msg_test_gh24_0); tcase_add_test (tc_remerge, gcs_state_msg_test_gh24_1); suite_add_tcase (s, tc_proto_ver); tcase_add_test (tc_proto_ver, gcs_state_msg_test_v4v6_upgrade); tcase_add_test (tc_proto_ver, gcs_state_msg_test_v5v6_upgrade); return s; } galera-4-26.4.25/gcs/src/unit_tests/gcs_act_cchange_test.cpp000644 000164 177776 00000005212 15107057155 025076 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2015 Codership Oy // $Id$ #include "../gcs.hpp" #include "gu_uuid.hpp" #include "gu_utils.hpp" #include // for ::free() #include "gcs_act_cchange_test.hpp" // must be included last START_TEST (zero_cc) { gcs_act_cchange const cc; ck_assert(cc.uuid == GU_UUID_NIL); ck_assert(cc.seqno == GCS_SEQNO_ILL); ck_assert(cc.conf_id == -1); ck_assert(cc.memb.size() == 0); ck_assert(cc.repl_proto_ver == -1); ck_assert(cc.appl_proto_ver == -1); } END_TEST START_TEST (serialization) { gcs_act_cchange cc_src; void* buf(NULL); int size(cc_src.write(&buf)); ck_assert(NULL != buf); ck_assert(size > 0); { gcs_act_cchange const cc_dst(buf, size); ck_assert(cc_dst == cc_src); } /* try buffer corruption, exception must be thrown */ try { static_cast(buf)[size/2] += 1; gcs_act_cchange const cc_dst(buf, size); ck_abort_msg("exception must be thrown"); } catch (gu::Exception& e) {} ::free(buf); cc_src.seqno = 1234567890; cc_src.conf_id = 234; cc_src.repl_proto_ver = 4; cc_src.appl_proto_ver = 5; for (int i(0); i < 128; ++i) // make really big cluster ;) { gcs_act_cchange::member m; gu_uuid_generate(&m.uuid_, &i, sizeof(i)); m.name_ = std::string("node") + gu::to_string(i); m.incoming_ = std::string("192.168.0.") + gu::to_string(i) + ":4567"; m.cached_ = i % 7 + 47; // some random number m.state_ = gcs_node_state(i % GCS_NODE_STATE_MAX); cc_src.memb.push_back(m); } size = cc_src.write(&buf); ck_assert(NULL != buf); ck_assert(size > 0); { gcs_act_cchange const cc_dst(buf, size); ck_assert(cc_dst == cc_src); ck_assert(cc_dst.seqno == cc_src.seqno); ck_assert(cc_dst.conf_id == cc_src.conf_id); ck_assert(cc_dst.memb.size() == cc_src.memb.size()); ck_assert(cc_dst.repl_proto_ver == cc_src.repl_proto_ver); ck_assert(cc_dst.appl_proto_ver == cc_src.appl_proto_ver); } /* another buffer corruption, exception must be thrown */ try { static_cast(buf)[size/2] += 1; gcs_act_cchange const cc_dst(buf, size); ck_abort_msg("exception must be thrown"); } catch (gu::Exception& e) {} ::free(buf); } END_TEST Suite *gcs_act_cchange_suite(void) { Suite *s = suite_create("CC functions"); TCase *tc = tcase_create("gcs_act_cchange"); suite_add_tcase (s, tc); tcase_add_test (tc, zero_cc); tcase_add_test (tc, serialization); return s; } galera-4-26.4.25/gcs/src/unit_tests/gcs_comp_test.cpp000644 000164 177776 00000011452 15107057155 023620 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include #define GCS_COMP_MSG_ACCESS #include "../gcs_comp_msg.hpp" #include "gcs_comp_test.hpp" // must be included last static gcs_comp_memb_t const members[] = { { "0", 0 }, { "88888888", 1 }, { "1", 5 }, { "7777777", 1 }, { "22", 3 }, { "666666", 4 }, { "333", 5 }, { "55555", 5 }, { "4444", 0 } }; static char long_id[] = "just make it longer when the test starts to fail because of increased limit"; static void check_msg_identity (const gcs_comp_msg_t* m, const gcs_comp_msg_t* n) { long i; ck_assert(n->primary == m->primary); ck_assert(n->my_idx == m->my_idx); ck_assert(n->memb_num == m->memb_num); for (i = 0; i < m->memb_num; i++) { ck_assert_msg(strlen(n->memb[i].id) == strlen(m->memb[i].id), "member %ld id len does not match: %zu vs %zu", i, strlen(n->memb[i].id), strlen(m->memb[i].id)); ck_assert_msg(!strncmp(n->memb[i].id, m->memb[i].id, GCS_COMP_MEMB_ID_MAX_LEN), "member %ld IDs don't not match: got '%s', " "should be '%s'", i, members[i].id, m->memb[i].id); ck_assert_msg(n->memb[i].segment == m->memb[i].segment, "member %ld segments don't not match: got '%d', " "should be '%d'", i, (int)members[i].segment, (int)m->memb[i].segment); } } START_TEST (gcs_comp_test) { long memb_num = sizeof(members)/sizeof(members[0]); long my_idx = getpid() % memb_num; long prim = my_idx % 2; gcs_comp_msg_t* m = gcs_comp_msg_new (prim, false, my_idx, memb_num, 0); gcs_comp_msg_t* n = NULL; size_t buf_len = gcs_comp_msg_size (m); char buf[buf_len]; long i, j; long ret; ck_assert(NULL != m); ck_assert(memb_num == gcs_comp_msg_num (m)); ck_assert(my_idx == gcs_comp_msg_self (m)); // add members except for the last for (i = 0; i < memb_num - 1; i++) { ret = gcs_comp_msg_add (m, members[i].id, members[i].segment); ck_assert_msg(ret == i, "gcs_comp_msg_add() returned %ld, expected %ld", ret, i); } // try to add a id that was added already if (my_idx < i) { j = my_idx; } else { j = i - 1; } ret = gcs_comp_msg_add (m, members[j].id, members[j].segment); ck_assert_msg(ret == -ENOTUNIQ, "gcs_comp_msg_add() returned %ld, expected " "-ENOTUNIQ (%d)", ret, -ENOTUNIQ); // try to add empty id ret = gcs_comp_msg_add (m, "", 0); ck_assert_msg(ret == -EINVAL, "gcs_comp_msg_add() returned %ld, expected " "-EINVAL (%d)", ret, -EINVAL); // try to add id that is too long ret = gcs_comp_msg_add (m, long_id, 3); ck_assert_msg(ret == -ENAMETOOLONG, "gcs_comp_msg_add() returned %ld, expected " "-ENAMETOOLONG (%d)", ret, -ENAMETOOLONG); // add final id ret = gcs_comp_msg_add (m, members[i].id, members[i].segment); ck_assert_msg(ret == i, "gcs_comp_msg_add() returned %ld, expected %ld", ret, i); // check that all added correctly for (i = 0; i < memb_num; i++) { const char* const id = gcs_comp_msg_member(m, i)->id; ck_assert_msg(!strcmp(members[i].id, id), "Memeber %ld (%s) recorded as %s", i, members[i].id, id); } // check that memcpy preserves the message // (it can be treated just as a byte array) memcpy (buf, m, buf_len); n = (gcs_comp_msg_t*) buf; check_msg_identity (m, n); gcs_comp_msg_delete (m); mark_point(); // check that gcs_comp_msg_copy() works m = gcs_comp_msg_copy (n); ck_assert(NULL != m); check_msg_identity (m, n); gcs_comp_msg_delete (m); // test gcs_comp_msg_member() ck_assert(NULL == gcs_comp_msg_member (n, -1)); for (i = 0; i < memb_num; i++) { const char* id = gcs_comp_msg_member (n, i)->id; ck_assert(NULL != id); ck_assert(!strcmp(members[i].id, id)); } ck_assert(NULL == gcs_comp_msg_member (n, i)); // test gcs_comp_msg_idx() ck_assert(-1 == gcs_comp_msg_idx (n, "")); ck_assert(-1 == gcs_comp_msg_idx (n, long_id)); for (i = 0; i < memb_num; i++) ck_assert(i == gcs_comp_msg_idx (n, members[i].id)); // test gcs_comp_msg_primary() ck_assert(n->primary == gcs_comp_msg_primary(n)); } END_TEST Suite *gcs_comp_suite(void) { Suite *suite = suite_create("GCS component message"); TCase *tcase = tcase_create("gcs_comp"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_comp_test); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_defrag_test.cpp000644 000164 177776 00000012044 15107057155 024110 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2020 Codership Oy * * $Id$ */ #include "../gcs_defrag.hpp" #include "gcs_defrag_test.hpp" // must be included last #define TRUE (0 == 0) #define FALSE (!TRUE) static void defrag_check_init (gcs_defrag_t* defrag) { ck_assert(defrag->sent_id == GCS_SEQNO_ILL); ck_assert(defrag->head == NULL); ck_assert(defrag->tail == NULL); ck_assert(defrag->size == 0); ck_assert(defrag->received == 0); ck_assert(defrag->frag_no == 0); } START_TEST (gcs_defrag_test) { ssize_t ret; // The Action char act_buf[] = "Test action smuction"; size_t act_len = sizeof (act_buf); // lengths of three fragments of the action size_t frag1_len = act_len / 3; size_t frag2_len = frag1_len; size_t frag3_len = act_len - frag1_len - frag2_len; // pointer to the three fragments of the action const char* frag1 = act_buf; const char* frag2 = frag1 + frag1_len; const char* frag3 = frag2 + frag2_len; // recv fragments gcs_act_frag_t frg1, frg2, frg3, frg4; gcs_defrag_t defrag; struct gcs_act recv_act; void* tail; mark_point(); #ifndef NDEBUG // debug build breaks this test due to asserts return; #endif // Initialize message parameters frg1.act_id = getpid(); frg1.act_size = act_len; frg1.frag = frag1; frg1.frag_len = frag1_len; frg1.frag_no = 0; frg1.act_type = GCS_ACT_WRITESET; frg1.proto_ver = 0; // normal fragments frg2 = frg3 = frg1; frg2.frag = frag2; frg2.frag_len = frag2_len; frg2.frag_no = frg1.frag_no + 1; frg3.frag = frag3; frg3.frag_len = frag3_len; frg3.frag_no = frg2.frag_no + 1; // bad fragmets to be tried instead of frg2 frg4 = frg2; frg4.frag = "junk"; frg4.frag_len = strlen("junk"); frg4.act_id = frg2.act_id + 1; // wrong action id mark_point(); // ready for the first fragment gcs_defrag_init (&defrag, NULL); defrag_check_init (&defrag); mark_point(); // 1. Try fragment that is not the first ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, FALSE); ck_assert(ret == -EPROTO); mark_point(); defrag_check_init (&defrag); // should be no changes // 2. Try first fragment ret = gcs_defrag_handle_frag (&defrag, &frg1, &recv_act, FALSE); ck_assert(ret == 0); ck_assert(defrag.head != NULL); ck_assert(defrag.received == frag1_len); ck_assert(defrag.tail == defrag.head + defrag.received); tail = defrag.tail; #define TRY_WRONG_2ND_FRAGMENT(frag) \ ret = gcs_defrag_handle_frag (&defrag, &frag, &recv_act, FALSE); \ if (defrag.frag_no < frag.frag_no) ck_assert(ret == -EPROTO); \ else ck_assert(ret == 0); \ ck_assert(defrag.received == frag1_len); \ ck_assert(defrag.tail == tail); // 3. Try first fragment again TRY_WRONG_2ND_FRAGMENT(frg1); // 4. Try third fragment TRY_WRONG_2ND_FRAGMENT(frg3); // 5. Try fouth fragment TRY_WRONG_2ND_FRAGMENT(frg4); // 6. Try second fragment ret = gcs_defrag_handle_frag (&defrag, &frg2, &recv_act, FALSE); ck_assert(ret == 0); ck_assert(defrag.received == frag1_len + frag2_len); ck_assert(defrag.tail == defrag.head + defrag.received); // 7. Try third fragment, last one ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, FALSE); ck_assert(ret == (long)act_len); // 8. Check the action ck_assert(recv_act.buf != NULL); ck_assert(recv_act.buf_len == (long)act_len); ck_assert_msg(!strncmp((const char*)recv_act.buf, act_buf, act_len), "Action received: '%s', expected '%s'", static_cast(recv_act.buf) ,act_buf); defrag_check_init (&defrag); // should be empty gcs_gcache_free(defrag.cache, recv_act.buf); // 9. Try the same with local action ret = gcs_defrag_handle_frag (&defrag, &frg1, &recv_act, TRUE); ck_assert(ret == 0); // ck_assert(defrag.head == NULL); (and now we may allocate it for cache) ret = gcs_defrag_handle_frag (&defrag, &frg2, &recv_act, TRUE); ck_assert(ret == 0); // ck_assert(defrag.head == NULL); (and now we may allocate it for cache) ret = gcs_defrag_handle_frag (&defrag, &frg3, &recv_act, TRUE); ck_assert(ret == (long)act_len); // ck_assert(defrag.head == NULL); (and now we may allocate it for cache) // 10. Check the action ck_assert(recv_act.buf != NULL); ck_assert(recv_act.buf_len == (long)act_len); // ck_assert(recv_act.buf == NULL); (and now we may allocate it for cache) defrag_check_init (&defrag); // should be empty gcs_gcache_free(defrag.cache, recv_act.buf); } END_TEST Suite *gcs_defrag_suite(void) { Suite *suite = suite_create("GCS defragmenter"); TCase *tcase = tcase_create("gcs_defrag"); suite_add_tcase (suite, tcase); tcase_add_test (tcase, gcs_defrag_test); return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_sm_test.hpp000644 000164 177776 00000000317 15107057155 023304 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2015 Codership Oy // $Id$ #ifndef __gcs_sm_test__ #define __gcs_sm_test__ #include Suite *gcs_send_monitor_suite(void); #endif /* __gcs_sm_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_group_test.hpp000644 000164 177776 00000000340 15107057155 024015 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_group_test__ #define __gcs_group_test__ #include extern Suite *gcs_group_suite(void); #endif /* __gu_group_test__ */ galera-4-26.4.25/gcs/src/unit_tests/SConscript000644 000164 177776 00000006047 15107057155 022301 0ustar00jenkinsnogroup000000 000000 Import('check_env all_tests') env = check_env.Clone() # Include paths env.Append(CPPPATH = Split(''' #/common #/galerautils/src #/gcache/src #/gcs/src ''')) # For C-style logging env.Append(CPPFLAGS = ' -DGALERA_LOG_H_ENABLE_CXX -Wno-variadic-macros') # Disable old style cast warns until code is fixed env.Append(CPPFLAGS = ' -Wno-old-style-cast') # Allow zero sized arrays env.Replace(CCFLAGS = env['CCFLAGS'].replace('-pedantic', '')) env.Append(CPPFLAGS = ' -Wno-missing-field-initializers') env.Append(CPPFLAGS = ' -Wno-effc++') if all_tests: env.Append(CPPFLAGS = ' -DGCS_ALLOW_GH74') gcs_tests_sources = Split(''' gcs_tests.cpp gcs_test_utils.cpp gcs_fifo_test.cpp ../gcs_fifo_lite.cpp gcs_sm_test.cpp ../gcs_sm.cpp gcs_comp_test.cpp ../gcs_comp_msg.cpp gcs_state_msg_test.cpp ../gcs_state_msg.cpp gcs_backend_test.cpp ../gcs_backend.cpp gcs_proto_test.cpp ../gcs_act_proto.cpp gcs_defrag_test.cpp ../gcs_defrag.cpp gcs_node_test.cpp ../gcs_node.cpp gcs_act_cchange_test.cpp ../gcs_act_cchange.cpp gcs_group_test.cpp gcs_memb_test.cpp ../gcs_code_msg.cpp ../gcs_group.cpp gcs_core_test.cpp ../gcs_core.cpp ../gcs_dummy.cpp ../gcs_msg_type.cpp ../gcs.cpp ../gcs_params.cpp gcs_fc_test.cpp ../gcs_fc.cpp ../gcs_error.cpp ''') #env.Append(CPPFLAGS = ' -DGCS_USE_GCOMM -DGCS_CORE_TESTING -DGCS_DUMMY_TESTING') env.Append(CPPFLAGS = ' -DGCS_CORE_TESTING -DGCS_DUMMY_TESTING') env.Append(LIBS = File('#/gcache/src/libgcache.a')) env.Append(LIBS = File('#/gcomm/src/libgcomm.a')) env.Append(LIBS = File('#/galerautils/src/libgalerautils++.a')) env.Append(LIBS = File('#/galerautils/src/libgalerautils.a')) env.Append(LIBS = ['m', 'ssl', 'crypto']) gcs_tests = env.Program(target = 'gcs_tests', source = gcs_tests_sources, OBJPREFIX = 'gcs-tests-', LINK = env['CXX']) env.Test("gcs_tests.passed", gcs_tests) env.Alias("test", "gcs_tests.passed") Clean(gcs_tests, '#/gcs_tests.log') galera-4-26.4.25/gcs/src/unit_tests/gcs_sm_test.cpp000644 000164 177776 00000042311 15107057155 023277 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2010-2020 Codership Oy // $Id$ #include "../gcs_sm.hpp" #include // fabs #include #include "gcs_sm_test.hpp" // must be included last #define TEST_USLEEP 10000 /* we can't use pthread functions for waiting for certain conditions */ #define WAIT_FOR(cond) \ { int count = 1000; while (--count && !(cond)) { usleep (1000); }} START_TEST (gcs_sm_test_basic) { int ret; gcs_sm_t* sm = gcs_sm_create(2, 1); ck_assert(sm != NULL); gu_cond_t cond; gu_cond_init (&cond, NULL); int i; for (i = 1; i < 5; i++) { ret = gcs_sm_enter(sm, &cond, false, true); ck_assert_msg(0 == ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); ck_assert_msg(sm->entered == 1, "entered = %ld, expected 1", sm->entered); gcs_sm_leave(sm); ck_assert_msg(sm->entered == 0, "entered = %ld, expected 0", sm->entered); } ret = gcs_sm_close(sm); ck_assert(0 == ret); gcs_sm_destroy(sm); gu_cond_destroy(&cond); } END_TEST volatile long simple_ret; static void* simple_thread(void* arg) { gcs_sm_t* sm = (gcs_sm_t*) arg; gu_cond_t cond; gu_cond_init (&cond, NULL); if (0 == (simple_ret = gcs_sm_enter (sm, &cond, false, true))) { usleep(1000); gcs_sm_leave (sm); } gu_cond_destroy (&cond); return NULL; } START_TEST (gcs_sm_test_simple) { int ret; gcs_sm_t* sm = gcs_sm_create(4, 1); ck_assert(sm != NULL); gu_cond_t cond; gu_cond_init (&cond, NULL); ret = gcs_sm_enter(sm, &cond, false, true); ck_assert_msg(0 == ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); ck_assert_msg(sm->entered == true, "entered = %ld, expected %d", sm->users, true); gu_thread_t t1, t2, t3, t4; gu_thread_create (&t1, NULL, simple_thread, sm); gu_thread_create (&t2, NULL, simple_thread, sm); gu_thread_create (&t3, NULL, simple_thread, sm); WAIT_FOR ((long)sm->wait_q_len == sm->users); ck_assert_msg((long)sm->wait_q_len == sm->users, "wait_q_len = %lu, users = %ld", sm->wait_q_len, sm->users); gu_thread_create (&t4, NULL, simple_thread, sm); mark_point(); gu_thread_join (t4, NULL); // there's no space in the queue ck_assert(simple_ret == -EAGAIN); ck_assert_msg(0 == sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); ck_assert_msg(4 == sm->users, "users = %lu, expected 4", sm->users); gu_info ("Calling gcs_sm_leave()"); gcs_sm_leave(sm); ck_assert_msg(4 > sm->users, "users = %lu, expected 4", sm->users); gu_info ("Calling gcs_sm_close()"); ret = gcs_sm_close(sm); ck_assert(0 == ret); gu_thread_join(t1, NULL); gu_thread_join(t2, NULL); gu_thread_join(t3, NULL); gcs_sm_destroy(sm); gu_cond_destroy(&cond); } END_TEST static volatile int order = 0; // global variable to trac the order of events static void* closing_thread (void* data) { gcs_sm_t* sm = (gcs_sm_t*)data; ck_assert_msg(order == 0, "order is %d, expected 0", order); order = 1; int ret = gcs_sm_close(sm); ck_assert(0 == ret); ck_assert_msg(order == 2, "order is %d, expected 2", order); gcs_sm_destroy(sm); return NULL; } START_TEST (gcs_sm_test_close) { order = 0; gcs_sm_t* sm = gcs_sm_create(2, 1); ck_assert(sm != NULL); gu_cond_t cond; gu_cond_init (&cond, NULL); int ret = gcs_sm_enter(sm, &cond, false, true); ck_assert_msg(0 == ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); ck_assert(order == 0); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); ck_assert_msg(1 == sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); gu_thread_t thr; gu_thread_create (&thr, NULL, closing_thread, sm); WAIT_FOR(1 == order); ck_assert_msg(order == 1, "order is %d, expected 1", order); usleep(TEST_USLEEP); // make sure closing_thread() blocks in gcs_sm_close() ck_assert_msg(sm->users == 2, "users = %ld, expected 2", sm->users); gu_info ("Started close thread, users = %ld", sm->users); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); ck_assert_msg(0 == sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); ck_assert(1 == sm->entered); order = 2; gcs_sm_leave(sm); mark_point(); gu_thread_join(thr, NULL); gu_cond_destroy(&cond); } END_TEST static volatile int pause_order = 0; static void* pausing_thread (void* data) { gu_info ("pausing_thread start, pause_order = %d", pause_order); gcs_sm_t* sm = (gcs_sm_t*)data; gu_cond_t cond; gu_cond_init (&cond, NULL); gcs_sm_schedule (sm); gu_info ("pausing_thread scheduled, pause_order = %d", pause_order); ck_assert_msg(pause_order == 0, "pause_order = %d, expected 0", pause_order); pause_order = 1; gcs_sm_enter (sm, &cond, true, true); gu_info ("pausing_thread entered, pause_order = %d", pause_order); ck_assert_msg(pause_order == 2, "pause_order = %d, expected 2", pause_order); pause_order = 3; usleep(TEST_USLEEP); gcs_sm_leave (sm); mark_point(); gu_cond_destroy(&cond); gu_info ("pausing_thread exit, pause_order = %d", pause_order); return NULL; } static double const EPS = 1.0e-15; // double precision START_TEST (gcs_sm_test_pause) { int q_len; int q_len_max; int q_len_min; double q_len_avg; long long paused_ns; double paused_avg; gcs_sm_t* sm = gcs_sm_create(4, 1); ck_assert(sm != NULL); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); gu_cond_t cond; gu_cond_init (&cond, NULL); gu_thread_t thr; gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &paused_ns, &paused_avg); ck_assert_msg(paused_ns == 0, "paused_ns: expected 0, got %lld", paused_ns); ck_assert_msg(fabs(paused_avg) <= EPS, "paused_avg: expected <= %e, got %e", EPS, fabs(paused_avg)); ck_assert_msg(fabs(q_len_avg) <= EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); ck_assert(q_len == 0); ck_assert(q_len_max == 0); ck_assert(q_len_min == 0); // Test attempt to enter paused monitor pause_order = 0; gcs_sm_pause (sm); gu_thread_create (&thr, NULL, pausing_thread, sm); WAIT_FOR(1 == pause_order); ck_assert_msg(pause_order == 1, "pause_order = %d, expected 1", pause_order); usleep(TEST_USLEEP); // make sure pausing_thread blocked in gcs_sm_enter() pause_order = 2; // testing taking stats in the middle of the pause pt. 1 gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &paused_ns, &paused_avg); ck_assert(paused_ns > 0.0); ck_assert(paused_avg > 0.0); ck_assert_msg(fabs(q_len_avg) <= EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gu_info ("Calling gcs_sm_continue()"); gcs_sm_continue (sm); gu_thread_join (thr, NULL); ck_assert_msg(pause_order == 3, "pause_order = %d, expected 3", pause_order); ck_assert_msg(2 == sm->wait_q_head, "wait_q_head = %lu, expected 2", sm->wait_q_head); ck_assert_msg(1 == sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); // testing taking stats in the middle of the pause pt. 2 long long tmp; gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); ck_assert(tmp >= paused_ns); paused_ns = tmp; ck_assert(paused_avg > 0.0); ck_assert_msg(fabs(q_len_avg) <= EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_stats_flush(sm); // Testing scheduling capability gcs_sm_schedule (sm); ck_assert_msg(2 == sm->wait_q_tail, "wait_q_tail = %lu, expected 2", sm->wait_q_tail); gu_thread_create (&thr, NULL, pausing_thread, sm); usleep (TEST_USLEEP); // no changes in pause_order ck_assert_msg(pause_order == 3, "pause_order = %d, expected 3",pause_order); pause_order = 0; int ret = gcs_sm_enter(sm, &cond, true, true); ck_assert_msg(0 == ret, "gcs_sm_enter() failed: %d (%s)", ret, strerror(-ret)); // released monitor lock, thr should continue and schedule, // set pause_order to 1 WAIT_FOR(1 == pause_order); ck_assert_msg(pause_order == 1, "pause_order = %d, expected 1", pause_order); ck_assert_msg(sm->users == 2, "users = %ld, expected 2", sm->users); ck_assert_msg(2 == sm->wait_q_head, "wait_q_head = %lu, expected 2", sm->wait_q_head); ck_assert_msg(3 == sm->wait_q_tail, "wait_q_tail = %lu, expected 3", sm->wait_q_tail); gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); ck_assert(tmp >= paused_ns); paused_ns = tmp; ck_assert_msg(fabs(paused_avg) <= EPS, "paused_avg: expected <= %e, got %e", EPS, fabs(paused_avg)); ck_assert_msg(q_len == sm->users, "found q_len %d, expected = %ld", q_len, sm->users); ck_assert_msg(q_len_max == q_len, "found q_len_max %d, expected = %d", q_len_max, q_len); ck_assert_msg(q_len_min == 0, "found q_len_min %d, expected = 0", q_len_min); ck_assert_msg(fabs(q_len_avg - 0.5) <= EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_stats_flush(sm); gu_info ("Started pause thread, users = %ld", sm->users); // Now test pausing when monitor is in entered state pause_order = 2; gcs_sm_pause (sm); usleep (TEST_USLEEP); gcs_sm_continue (sm); // nothing should continue, since monitor is entered usleep (TEST_USLEEP); ck_assert_msg(pause_order == 2, "pause_order = %d, expected 2", pause_order); ck_assert_msg(sm->entered == 1, "entered = %ld, expected 1", sm->entered); // Now test pausing when monitor is left gcs_sm_pause (sm); ck_assert_msg(sm->users == 2, "users = %ld, expected 2", sm->users); gcs_sm_leave (sm); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); ck_assert_msg(sm->entered == 0, "entered = %ld, expected 1", sm->entered); ck_assert_msg(3 == sm->wait_q_head, "wait_q_head = %lu, expected 3", sm->wait_q_head); ck_assert_msg(3 == sm->wait_q_tail, "wait_q_tail = %lu, expected 3", sm->wait_q_tail); usleep (TEST_USLEEP); // nothing should change, since monitor is paused ck_assert_msg(pause_order == 2, "pause_order = %d, expected 2", pause_order); ck_assert_msg(sm->entered == 0, "entered = %ld, expected 0", sm->entered); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); gcs_sm_continue (sm); // paused thread should continue WAIT_FOR(3 == pause_order); ck_assert_msg(pause_order == 3, "pause_order = %d, expected 3", pause_order); gcs_sm_stats_get (sm, &q_len, &q_len_max, &q_len_min, &q_len_avg, &tmp, &paused_avg); ck_assert(tmp > paused_ns); paused_ns = tmp; ck_assert(paused_avg > 0.0); ck_assert_msg(fabs(q_len_avg) <= EPS, "q_len_avg: expected <= %e, got %e", EPS, fabs(q_len_avg)); gcs_sm_enter (sm, &cond, false, true); // by now paused thread exited monitor ck_assert_msg(sm->entered == 1, "entered = %ld, expected 1", sm->entered); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); ck_assert_msg(0 == sm->wait_q_head, "wait_q_head = %lu, expected 0", sm->wait_q_head); ck_assert_msg(0 == sm->wait_q_tail, "wait_q_tail = %lu, expected 0", sm->wait_q_tail); gcs_sm_leave (sm); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); mark_point(); gu_cond_destroy(&cond); gcs_sm_close (sm); mark_point(); gu_thread_join(thr, NULL); gcs_sm_destroy (sm); } END_TEST static volatile long global_handle = 0; static volatile long global_ret = 0; static void* interrupt_thread(void* arg) { gcs_sm_t* sm = (gcs_sm_t*) arg; global_handle = gcs_sm_schedule (sm); if (global_handle >= 0) { pthread_cond_t cond; pthread_cond_init (&cond, NULL); if (0 == (global_ret = gcs_sm_enter (sm, &cond, true, true))) { gcs_sm_leave (sm); } pthread_cond_destroy (&cond); } return NULL; } #define TEST_CREATE_THREAD(thr, tail, h, u) \ global_handle = -1; \ gu_thread_create (thr, NULL, interrupt_thread, sm); \ WAIT_FOR(global_handle == h); \ ck_assert_msg(sm->wait_q_tail == tail, "wait_q_tail = %lu, expected %lu", \ sm->wait_q_tail, static_cast(tail)); \ ck_assert_msg(global_handle == h, "global_handle = %ld, expected %ld", \ global_handle, static_cast(h)); \ ck_assert_msg(sm->users == u, "users = %ld, expected %ld", \ sm->users, static_cast(u)); #define TEST_INTERRUPT_THREAD(h, t) \ ret = gcs_sm_interrupt (sm, (h)); \ ck_assert(ret == 0); \ gu_thread_join ((t), NULL); \ ck_assert_msg(global_ret == -EINTR, "global_ret = %ld, " \ "expected %d (-EINTR)", global_ret, -EINTR); START_TEST (gcs_sm_test_interrupt) { gcs_sm_t* sm = gcs_sm_create(4, 1); ck_assert(sm != NULL); gu_cond_t cond; gu_cond_init (&cond, NULL); gu_thread_t thr1; gu_thread_t thr2; gu_thread_t thr3; long handle = gcs_sm_schedule (sm); ck_assert_msg(handle == 0, "handle = %ld, expected 0", handle); ck_assert_msg(sm->wait_q_tail == 1, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); long ret = gcs_sm_enter (sm, &cond, true, true); ck_assert(ret == 0); /* 1. Test interrupting blocked by previous thread */ TEST_CREATE_THREAD(&thr1, 2, 3, 2); TEST_CREATE_THREAD(&thr2, 3, 4, 3); TEST_INTERRUPT_THREAD(3, thr1); gcs_sm_leave (sm); // this should let 2nd enter monitor gu_thread_join (thr2, NULL); ck_assert_msg(global_ret == 0, "global_ret = %ld, expected 0", global_ret); ck_assert_msg(sm->users == 0, "users = %ld, expected 0", sm->users); ret = gcs_sm_interrupt (sm, 4); // try to interrupt 2nd which has exited ck_assert(ret == -ESRCH); /* 2. Test interrupting blocked by pause */ gcs_sm_pause (sm); TEST_CREATE_THREAD(&thr1, 0, 1, 1); TEST_INTERRUPT_THREAD(1, thr1); TEST_CREATE_THREAD(&thr2, 1, 2, 1); /* test queueing after interrupted */ TEST_CREATE_THREAD(&thr3, 2, 3, 2); TEST_INTERRUPT_THREAD(3, thr3); /* test interrupting last waiter */ gcs_sm_continue (sm); gu_thread_join (thr2, NULL); ck_assert_msg(global_ret == 0, "global_ret = %ld, expected 0", global_ret); /* 3. Unpausing totally interrupted monitor */ gcs_sm_pause (sm); TEST_CREATE_THREAD(&thr1, 3, 4, 1); TEST_INTERRUPT_THREAD(4, thr1); TEST_CREATE_THREAD(&thr1, 0, 1, 1); TEST_INTERRUPT_THREAD(1, thr1); gcs_sm_continue (sm); /* check that monitor is still functional */ ret = gcs_sm_enter (sm, &cond, false, true); ck_assert(ret == 0); ck_assert_msg(1 == sm->wait_q_head, "wait_q_head = %lu, expected 1", sm->wait_q_head); ck_assert_msg(1 == sm->wait_q_tail, "wait_q_tail = %lu, expected 1", sm->wait_q_tail); ck_assert_msg(sm->users == 1, "users = %ld, expected 1", sm->users); TEST_CREATE_THREAD(&thr1, 2, 3, 2); gu_info ("Calling gcs_sm_leave()"); gcs_sm_leave (sm); pthread_join (thr1, NULL); ck_assert_msg(global_ret == 0, "global_ret = %ld, expected 0", global_ret); pthread_cond_destroy (&cond); gcs_sm_close (sm); gcs_sm_destroy (sm); } END_TEST Suite *gcs_send_monitor_suite(void) { Suite *s = suite_create("GCS send monitor"); TCase *tc = tcase_create("gcs_sm"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_sm_test_basic); tcase_add_test (tc, gcs_sm_test_simple); tcase_add_test (tc, gcs_sm_test_close); tcase_add_test (tc, gcs_sm_test_pause); tcase_add_test (tc, gcs_sm_test_interrupt); return s; } galera-4-26.4.25/gcs/src/unit_tests/gcs_fifo_test.cpp000644 000164 177776 00000006134 15107057155 023606 0ustar00jenkinsnogroup000000 000000 // Copyright (C) 2007-2020 Codership Oy // $Id$ #include "../gcs_fifo_lite.hpp" #include "gcs_fifo_test.hpp" // must be included last #define FIFO_LENGTH 10 START_TEST (gcs_fifo_lite_test) { gcs_fifo_lite_t* fifo; long ret; long i; long* item; fifo = gcs_fifo_lite_create (0, 1); ck_assert(fifo == NULL); fifo = gcs_fifo_lite_create (1, 0); ck_assert(fifo == NULL); fifo = gcs_fifo_lite_create (1, 1); ck_assert(fifo != NULL); ret = gcs_fifo_lite_destroy (fifo); ck_assert_msg(ret == 0, "gcs_fifo_lite_destroy() returned %ld", ret); fifo = gcs_fifo_lite_create (FIFO_LENGTH, sizeof(i)); ck_assert(fifo != NULL); ck_assert_msg(fifo->used == 0, "fifo->used is %ld for an empty FIFO", fifo->used); gcs_fifo_lite_open (fifo); // fill FIFO for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_tail (fifo); ck_assert_msg(NULL != item, "gcs_fifo_lite_get_tail() returned NULL"); *item = i; gcs_fifo_lite_push_tail (fifo); } ck_assert_msg(fifo->used == FIFO_LENGTH, "fifo->used is %ld, expected %d", fifo->used, FIFO_LENGTH); // test remove for (i = 1; i <= FIFO_LENGTH; i++) { ret = gcs_fifo_lite_remove (fifo); ck_assert_msg(0 != ret, "gcs_fifo_lite_remove() failed, i = %ld", i); } ck_assert_msg(fifo->used == 0, "fifo->used is %ld, expected %d", fifo->used, 0); // try remove on empty queue ret = gcs_fifo_lite_remove (fifo); ck_assert_msg(0 == ret, "gcs_fifo_lite_remove() from empty FIFO returned true"); // it should be possible to fill FIFO again for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_tail (fifo); ck_assert_msg(NULL != item, "gcs_fifo_lite_get_tail() returned NULL"); *item = i; gcs_fifo_lite_push_tail (fifo); } ck_assert_msg(fifo->used == FIFO_LENGTH, "fifo->used is %ld, expected %d", fifo->used, FIFO_LENGTH); // test get for (i = 1; i <= FIFO_LENGTH; i++) { item = (long*)gcs_fifo_lite_get_head (fifo); ck_assert_msg(NULL != item, "gcs_fifo_lite_get_head() returned NULL"); ck_assert_msg(*item == i, "gcs_fifo_lite_get_head() returned %ld, " "expected %ld", *item, i); gcs_fifo_lite_release (fifo); item = (long*)gcs_fifo_lite_get_head (fifo); ck_assert_msg(NULL != item, "gcs_fifo_lite_get_head() returned NULL"); ck_assert_msg(*item == i, "gcs_fifo_lite_get_head() returned %ld, " "expected %ld", *item, i); gcs_fifo_lite_pop_head (fifo); } ck_assert_msg(fifo->used == 0, "fifo->used for empty queue is %ld", fifo->used); ret = gcs_fifo_lite_destroy (fifo); ck_assert_msg(ret == 0, "gcs_fifo_lite_destroy() failed: %ld", ret); } END_TEST Suite *gcs_fifo_suite(void) { Suite *s = suite_create("GCS FIFO functions"); TCase *tc = tcase_create("gcs_fifo"); suite_add_tcase (s, tc); tcase_add_test (tc, gcs_fifo_lite_test); return s; } galera-4-26.4.25/gcs/src/unit_tests/gcs_defrag_test.hpp000644 000164 177776 00000000344 15107057155 024115 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_defrag_test__ #define __gcs_defrag_test__ #include extern Suite *gcs_defrag_suite(void); #endif /* __gu_defrag_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_memb_test.hpp000644 000164 177776 00000000335 15107057155 023605 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011-2015 Codership Oy * * $Id$ */ #ifndef __gcs_memb_test__ #define __gcs_memb_test__ #include extern Suite *gcs_memb_suite(void); #endif /* __gu_group_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_core_test.cpp000644 000164 177776 00000112067 15107057155 023616 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2024 Codership Oy * * $Id$ */ /* * @file * * Defines unit tests for gcs_core (and as a result tests gcs_group and * a dummy backend which gcs_core depends on) * * Most of the checks require independent sending and receiving threads. * Approach 1 is to start separate threads for both sending and receiving * and use the current thread of execution to sychronize between them: * * CORE_RECV_START(act_r) * CORE_SEND_START(act_s) * while (gcs_core_send_step(Core)) { // step through action fragments * (do something) * }; * CORE_SEND_END(act_s, ret) // check return code * CORE_RECV_END(act_r, size, type) // makes checks against size and type * * A simplified approach 2 is: * * CORE_SEND_START(act_s) * while (gcs_core_send_step(Core)) { // step through action fragments * (do something) * }; * CORE_SEND_END(act_s, ret) // check return code * CORE_RECV_ACT(act_r, size, type) // makes checks agains size and type * * In the first approach group messages will be received concurrently. * In the second approach messages will wait in queue and be fetched afterwards * */ #include // PRId64 #define GCS_STATE_MSG_ACCESS #include "../gcs_core.hpp" #include "../gcs_dummy.hpp" #include "../gcs_seqno.hpp" #include "../gcs_state_msg.hpp" #include "../gcs_code_msg.hpp" #include #include "gu_config.hpp" #include "gcs_test_utils.hpp" #include "gcs_core_test.hpp" // must be included last START_TEST(gcs_code_msg) { gu::UUID const u0(NULL, 0); gcs_seqno_t const s0(1234); uint64_t const c0(4312); gcs::core::CodeMsg cm0(gu::GTID(u0, s0), c0); const void* const buf(cm0()); const gcs::core::CodeMsg* const cm1 (static_cast(buf)); gu::UUID const u1(cm1->uuid()); gcs_seqno_t const s1(cm1->seqno()); uint64_t const c1(cm1->code()); ck_assert(u0 == u1); ck_assert(s0 == s1); ck_assert(c0 == c1); } END_TEST extern ssize_t gcs_tests_get_allocated(); static const long UNKNOWN_SIZE = 1234567890; // some unrealistic number static std::string const CacheName("core_test.cache"); static gcache::GCache* Cache = NULL; static gcs_core_t* Core = NULL; static gcs_backend_t* Backend = NULL; static gcs_seqno_t Seqno = 0; static gu::UUID Uuid; typedef struct action { const struct gu_buf* in; void* out; const void* local; ssize_t size; gcs_act_type_t type; gcs_seqno_t seqno; gu_thread_t thread; action() { } action(const struct gu_buf* a_in, void* a_out, const void* a_local, ssize_t a_size, gcs_act_type_t a_type, gcs_seqno_t a_seqno, gu_thread_t a_thread) : in (a_in), out (a_out), local (a_local), size (a_size), type (a_type), seqno (a_seqno), thread (a_thread) { } } action_t; //static struct action_t RecvAct; static const ssize_t FRAG_SIZE = 4; // desirable action fragment size // 1-fragment action static const char act1_str[] = "101"; static const struct gu_buf act1[1] = { { act1_str, sizeof(act1_str) } }; // 2-fragment action, with buffers aligned with FRAG_SIZE static const char act2_str[] = "202122"; static const struct gu_buf act2[2] = { { "2021", 4 }, { "22", 3 } /* 4 + 3 = 7 = sizeof(act2_str) */ }; // 3-fragment action, with unaligned buffers static const char act3_str[] = "3031323334"; static const struct gu_buf act3[] = { { "303", 3 }, { "13", 2 }, { "23", 2 }, { "334", 4 } /* 3 + 2 + 2 + 4 = 11 = sizeof(act3_str) */ }; // action receive thread, returns after first action received, stores action // in the passed action_t object, uses global Core to receive static void* core_recv_thread (void* arg) { action_t* act = (action_t*)arg; // @todo: refactor according to new gcs_act types struct gcs_act_rcvd recv_act; act->size = gcs_core_recv (Core, &recv_act, GU_TIME_ETERNITY); act->out = (void*)recv_act.act.buf; act->local = recv_act.local; act->type = recv_act.act.type; act->seqno = recv_act.id; return (NULL); } // this macro logs errors from within a function #define FAIL_IF(expr, format, ...) \ if (expr) { \ gu_fatal ("FAIL: " format, __VA_ARGS__); \ ck_assert_msg(false, format, __VA_ARGS__); \ return true; \ } // Start a thread to receive an action // args: action_t object static inline bool CORE_RECV_START(action_t* act) { return (0 != gu_thread_create (&act->thread, NULL, core_recv_thread, act)); } static bool COMMON_RECV_CHECKS(action_t* act, const char* buf, int size, gcs_act_type_t type, gcs_seqno_t* seqno) { FAIL_IF (size != UNKNOWN_SIZE && size != act->size, "gcs_core_recv(): expected size %d, returned %zd (%s)", size, act->size, strerror (-act->size)); FAIL_IF (act->type != type, "type does not match: expected %d, got %d", type, act->type); FAIL_IF (act->size > 0 && act->out == NULL, "null buffer received with positive size: %zu", act->size); if (act->type == GCS_ACT_STATE_REQ) return false; // action is ordered only if it is of type GCS_ACT_WRITESET or // GCS_ACT_CCHANGE and not an error if (act->seqno > GCS_SEQNO_ILL) { FAIL_IF (GCS_ACT_WRITESET != act->type && GCS_ACT_CCHANGE != act->type && act->seqno > 0, "GCS_ACT_WRITESET != act->type (%d), while act->seqno: %lld", act->type, (long long)act->seqno); if (GCS_ACT_WRITESET == act->type) { assert((*seqno + 1) == act->seqno); FAIL_IF ((*seqno + 1) != act->seqno, "expected seqno %lld, got %lld", (long long)(*seqno + 1), (long long)act->seqno); *seqno = *seqno + 1; } else if (GCS_ACT_CCHANGE == act->type) { FAIL_IF (act->seqno < 0, "Negative seqno: %lld", (long long)act->seqno); Uuid = gcs_core_get_group(Core)->group_uuid; if (gcs_core_proto_ver(Core) >= 1) *seqno = *seqno + 1; } } if (NULL != buf) { if (GCS_ACT_WRITESET == act->type) { // local action buffer should not be copied ck_assert_msg(act->local == act->in, "Received buffer ptr is not the same as sent: " "%p != %p", act->in, act->local); ck_assert_msg(!memcmp(buf, act->out, act->size), "Received buffer contents is not the same as sent: " "'%s' != '%s'", buf, (char*)act->out); } else { ck_assert_msg(act->local != buf, "Received the same buffer ptr as sent"); ck_assert_msg(!memcmp(buf, act->out, act->size), "Received buffer contents is not the same as sent"); } } return false; } // Wait for recv thread to complete, perform required checks // args: action_t, expected size, expected type static bool CORE_RECV_END(action_t* act, const void* buf, ssize_t size, gcs_act_type_t type) { { int ret = gu_thread_join (act->thread, NULL); act->thread = (gu_thread_t)-1; FAIL_IF(0 != ret, "Failed to join recv thread: %d (%s)", ret, strerror (ret)); } return COMMON_RECV_CHECKS (act, (const char*)buf, size, type, &Seqno); } // Receive action in one call, perform required checks // args: pointer to action_t, expected size, expected type static bool CORE_RECV_ACT (action_t* act, const void* buf, // single buffer action repres. ssize_t size, gcs_act_type_t type) { struct gcs_act_rcvd recv_act; act->size = gcs_core_recv (Core, &recv_act, GU_TIME_ETERNITY); act->out = (void*)recv_act.act.buf; act->local = recv_act.local; act->type = recv_act.act.type; act->seqno = recv_act.id; return COMMON_RECV_CHECKS (act, (const char*)buf, size, type, &Seqno); } // Sending always needs to be done via separate thread (uses lock-stepping) void* core_send_thread (void* arg) { action_t* act = (action_t*)arg; // use seqno field to pass the return code, it is signed 8-byte integer act->seqno = gcs_core_send (Core, act->in, act->size, act->type); return (NULL); } // Start a thread to send an action // args: action_t object static bool CORE_SEND_START(action_t* act) { return (0 != gu_thread_create (&act->thread, NULL, core_send_thread, act)); } // Wait for send thread to complete, perform required checks // args: action_t, expected return code static bool CORE_SEND_END(action_t* act, long ret) { { long _ret = gu_thread_join (act->thread, NULL); act->thread = (gu_thread_t)-1; ck_assert_msg(0 == _ret, "Failed to join recv thread: %ld (%s)", _ret, strerror (_ret)); } ck_assert_msg(ret == act->seqno, "gcs_core_send(): expected %lld, returned %lld (%s)", (long long) ret, (long long) act->seqno, strerror (-act->seqno)); return false; } // check if configuration is the one that we expected static long core_test_check_conf (const void* const conf_msg, int const conf_size, bool const prim, long const my_idx, size_t const memb_num) { long ret = 0; gcs_act_cchange const conf(conf_msg, conf_size); if ((conf.conf_id >= 0) != prim) { gu_error ("Expected %s conf, received %s", prim ? "PRIMARY" : "NON-PRIMARY", (conf.conf_id >= 0) ? "PRIMARY" : "NON-PRIMARY"); ret = -1; } if (conf.memb.size() != memb_num) { gu_error ("Expected memb_num = %zd, got %zd", memb_num,conf.memb.size()); ret = -1; } return ret; } static long core_test_set_payload_size (ssize_t s) { long ret; const ssize_t arbitrary_pkt_size = s + 64; // big enough for payload to fit ret = gcs_core_set_pkt_size (Core, arbitrary_pkt_size); if (ret <= 0) { gu_error("set_pkt_size(%zd) returned: %ld (%s)", arbitrary_pkt_size, ret, strerror (-ret)); return ret; } ret = gcs_core_set_pkt_size (Core, arbitrary_pkt_size - ret + s); if (ret != s) { gu_error("set_pkt_size() returned: %ld instead of %zd", ret, s); return ret; } return 0; } // Initialises core and backend objects + some common tests static inline void core_test_init (gu::Config* config, bool bootstrap = true, int const gcs_proto_ver = 1) { long ret; action_t act; mark_point(); ck_assert(config != NULL); gcs_test::InitConfig(*config, CacheName); Cache = new gcache::GCache(NULL, *config, "."); Core = gcs_core_create (*config, reinterpret_cast(Cache), "core_test", "aaa.bbb.ccc.ddd:xxxx", 0, 0, gcs_proto_ver); ck_assert(NULL != Core); Backend = gcs_core_get_backend (Core); ck_assert(NULL != Backend); Seqno = 0; // reset seqno ret = core_test_set_payload_size (FRAG_SIZE); ck_assert_msg(-EBADFD == ret, "Expected -EBADFD, got: %ld (%s)", ret, strerror(-ret)); ret = gcs_core_open (Core, "yadda-yadda", "owkmevc", 1); ck_assert_msg(-EINVAL == ret, "Expected -EINVAL, got %ld (%s)", ret, strerror(-ret)); ret = gcs_core_open (Core, "yadda-yadda", "dummy://", bootstrap); ck_assert_msg(0 == ret, "Failed to open core connection: %ld (%s)", ret, strerror(-ret)); if (!bootstrap) { gcs_core_send_lock_step (Core, true); mark_point(); return; } // receive first configuration message ck_assert(!CORE_RECV_ACT (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act.out, act.size, bootstrap, 0, 1)); Cache->free(act.out); int const ver(gcs_core_proto_ver(Core)); ck_assert_msg(ver == gcs_proto_ver,"Expected protocol version: %d, got: %d", gcs_proto_ver, ver); // this will configure backend to have desired fragment size ret = core_test_set_payload_size (FRAG_SIZE); ck_assert_msg(0 == ret, "Failed to set up the message payload size: %ld (%s)", ret, strerror(-ret)); // try to send an action to check that everything's alright ret = gcs_core_send (Core, act1, sizeof(act1_str), GCS_ACT_WRITESET); ck_assert_msg(ret == sizeof(act1_str), "Expected %zu, got %ld (%s)", sizeof(act1_str), ret, strerror (-ret)); gu_warn ("Next CORE_RECV_ACT fails under valgrind"); act.in = act1; ck_assert(!CORE_RECV_ACT(&act, act1_str, sizeof(act1_str), GCS_ACT_WRITESET)); ret = gcs_core_send_join (Core, gu::GTID(Uuid, Seqno), 0); ck_assert_msg(ret >= 0, "gcs_core_send_join(): %ld (%s)", ret, strerror(-ret)); // no action to be received (we're joined already) ret = gcs_core_send_sync (Core, gu::GTID(Uuid, Seqno)); int const proto(gcs_core_proto_ver(Core)); ck_assert(proto == gcs_proto_ver); // checking just in case int const expected_ret (proto >= 1 ? gcs::core::CodeMsg::serial_size() : sizeof(gcs_seqno_t)); ck_assert_msg(ret == expected_ret, "gcs_core_send_sync(): %ld (%s)", ret, strerror(-ret)); ck_assert(!CORE_RECV_ACT(&act, NULL, sizeof(gcs_seqno_t), GCS_ACT_SYNC)); gcs_seqno_t const s(gcs_seqno_gtoh(*(gcs_seqno_t*)act.out)); int const expected_s(proto >= 1 ? 0 : Seqno); ck_assert_msg(s == expected_s, "Expected code %lld, got %lld", (long long)expected_s, (long long)s); gcs_core_send_lock_step (Core, true); mark_point(); } // cleans up core and backend objects static inline void core_test_cleanup () { long ret; char tmp[1]; action_t act; ck_assert(NULL != Core); ck_assert(NULL != Backend); // to fetch self-leave message ck_assert(!CORE_RECV_START (&act)); ret = gcs_core_close (Core); ck_assert_msg(0 == ret, "Failed to close core: %ld (%s)", ret, strerror (-ret)); ret = CORE_RECV_END (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE); ck_assert_msg(0 == ret, "ret: %ld (%s)", ret, strerror(-ret)); Cache->free(act.out); // check that backend is closed too ret = Backend->send (Backend, tmp, sizeof(tmp), GCS_MSG_ACTION); ck_assert(ret == -EBADFD); ret = gcs_core_destroy (Core); ck_assert_msg(0 == ret, "Failed to destroy core: %ld (%s)", ret, strerror (-ret)); { ssize_t allocated; allocated = gcs_tests_get_allocated(); ck_assert_msg(0 == allocated, "Expected 0 allocated bytes, found %zd", allocated); } delete Cache; ::unlink(CacheName.c_str()); } // just a smoke test for core API START_TEST (gcs_core_test_api) { gu::Config config; core_test_init (&config); ck_assert(NULL != Cache); ck_assert(NULL != Core); ck_assert(NULL != Backend); long ret; long tout = 100; // 100 ms timeout const struct gu_buf* act = act3; const void* act_buf = act3_str; size_t act_size = sizeof(act3_str); action_t act_s(act, NULL, NULL, act_size, GCS_ACT_WRITESET, -1, (gu_thread_t)-1); action_t act_r(act, NULL, NULL, -1, (gcs_act_type_t)GCS_ACT_UNKNOWN, -1, (gu_thread_t)-1); long i = 5; // test basic fragmentaiton while (i--) { long frags = (act_size - 1)/FRAG_SIZE + 1; gu_info ("Iteration %ld: act: %p, size: %zu, frags: %ld", i, act, act_size, frags); ck_assert(!CORE_SEND_START (&act_s)); while ((ret = gcs_core_send_step (Core, 3*tout)) > 0) { frags--; gu_info ("frags: %ld", frags); // usleep (1000); } ck_assert_msg(ret == 0, "gcs_core_send_step() returned: %ld (%s)", ret, strerror(-ret)); ck_assert_msg(frags == 0, "frags = %ld, instead of 0", frags); ck_assert(!CORE_SEND_END (&act_s, act_size)); ck_assert(!CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_WRITESET)); ret = gcs_core_set_last_applied (Core, gu::GTID(Uuid, Seqno)); ck_assert_msg(ret >= 0, "gcs_core_set_last_applied(): %ld (%s)", ret, strerror(-ret)); /* commit cut action size should be 8 */ ck_assert(!CORE_RECV_ACT (&act_r, NULL, 8, GCS_ACT_COMMIT_CUT)); ck_assert(Seqno == gcs_seqno_gtoh(*(gcs_seqno_t*)act_r.out)); free(act_r.out); // commit cut is allocated by malloc() } // send fake flow control action, its contents is not important gcs_core_send_fc (Core, act, act_size); ck_assert_msg(ret >= 0, "gcs_core_send_fc(): %ld (%s)", ret, strerror(-ret)); ck_assert(!CORE_RECV_ACT(&act_r, act, act_size, GCS_ACT_FLOW)); core_test_cleanup (); } END_TEST // do a single send step, compare with the expected result static inline bool CORE_SEND_STEP (gcs_core_t* core, long timeout, long ret, int line) { long err = gcs_core_send_step (core, timeout); ck_assert_msg(err >= 0, "gcs_core_send_step(): %ld (%s)", err, strerror (-err)); if (ret >= 0) { if (err != ret) { fprintf(stderr, "gcs_core_send_step(%ld, %ld) at line %d:" " expected %ld, got %ld", timeout, ret, line, ret, err); assert(0); // to catch a core if possible ck_abort(); } } return false; } static bool DUMMY_INJECT_COMPONENT (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { long ret = gcs_dummy_inject_msg (Backend, comp, gcs_comp_msg_size(comp), GCS_MSG_COMPONENT, GCS_SENDER_NONE); ck_assert_msg(ret > 0, "gcs_dummy_inject_msg(): %ld (%s)", ret, strerror(ret)); return false; } static bool DUMMY_INSTALL_COMPONENT (gcs_backend_t* backend, const gcs_comp_msg_t* comp) { bool primary = gcs_comp_msg_primary (comp); long my_idx = gcs_comp_msg_self (comp); long members = gcs_comp_msg_num (comp); action_t act; FAIL_IF (gcs_dummy_set_component(Backend, comp), "%s", "gcs_dummy_set_component"); FAIL_IF (DUMMY_INJECT_COMPONENT (Backend, comp), "%s", "DUMMT_INJECT_COMPONENT"); FAIL_IF (CORE_RECV_ACT (&act, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE), "%s", "CORE_RECV_ACT"); FAIL_IF (core_test_check_conf(act.out, act.size, primary, my_idx, members), "%s", "core_test_check_conf"); Cache->free(act.out); return false; } static void CORE_TEST_OWN (int gcs_proto_ver) { long const tout = 1000; // 100 ms timeout const struct gu_buf* act = act2; const void* act_buf = act2_str; size_t act_size = sizeof(act2_str); action_t act_s(act, NULL, NULL, act_size, GCS_ACT_WRITESET, -1, (gu_thread_t)-1); action_t act_r(act, NULL, NULL, -1, (gcs_act_type_t)GCS_ACT_UNKNOWN, -1, (gu_thread_t)-1); // Create primary and non-primary component messages gcs_comp_msg_t* prim = gcs_comp_msg_new (true, false, 0, 1, 0); gcs_comp_msg_t* non_prim = gcs_comp_msg_new (false, false, 0, 1, 0); ck_assert(NULL != prim); ck_assert(NULL != non_prim); gcs_comp_msg_add (prim, "node1", 0); gcs_comp_msg_add (non_prim, "node1", 1); gu::Config config; core_test_init (&config, true, gcs_proto_ver); ///////////////////////////////////////////// /// check behaviour in transitional state /// ///////////////////////////////////////////// ck_assert(!CORE_RECV_START (&act_r)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag usleep (10000); // resolve race between sending and setting transitional gcs_dummy_set_transitional (Backend); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag ck_assert(!CORE_SEND_STEP (Core, tout, 0, __LINE__)); // no frags left ck_assert(NULL == act_r.out); // should not have received anything ck_assert(!gcs_dummy_set_component (Backend, prim)); // return to PRIM state ck_assert(!CORE_SEND_END (&act_s, act_size)); ck_assert(!CORE_RECV_END (&act_r, act_buf, act_size, GCS_ACT_WRITESET)); /* * TEST CASE 1: Action was sent successfully, but NON_PRIM component * happened before any fragment could be delivered. * EXPECTED OUTCOME: action is received with -ENOTCONN instead of global * seqno */ ck_assert(!DUMMY_INJECT_COMPONENT (Backend, non_prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag ck_assert(!CORE_SEND_END (&act_s, act_size)); ck_assert(!gcs_dummy_set_component(Backend, non_prim)); ck_assert(!CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act_r.out, act_r.size, false, 0, 1)); Cache->free(act_r.out); ck_assert(!CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_WRITESET)); ck_assert_msg(-ENOTCONN == act_r.seqno, "Expected -ENOTCONN, received %" PRId64 " (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 2: core in NON_PRIM state. There is attempt to send an * action. * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 1st * fragment send fails. */ ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!CORE_SEND_STEP (Core, tout, 0, __LINE__)); // bail out after 1st frag ck_assert(!CORE_SEND_END (&act_s, -ENOTCONN)); /* * TEST CASE 3: Backend in NON_PRIM state. There is attempt to send an * action. * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 1st * fragment send fails. */ ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!gcs_dummy_set_component(Backend, non_prim)); ck_assert(!DUMMY_INJECT_COMPONENT (Backend, non_prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!CORE_SEND_END (&act_s, -ENOTCONN)); ck_assert(!CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act_r.out, act_r.size, false, 0, 1)); Cache->free(act_r.out); /* * TEST CASE 4: Action was sent successfully, but NON_PRIM component * happened in between delivered fragments. * EXPECTED OUTCOME: action is received with -ENOTCONN instead of global * seqno. */ ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!DUMMY_INJECT_COMPONENT (Backend, non_prim)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag ck_assert(!CORE_SEND_END (&act_s, act_size)); ck_assert(!CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act_r.out, act_r.size, false, 0, 1)); Cache->free(act_r.out); ck_assert(!CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_WRITESET)); ck_assert_msg(-ENOTCONN == act_r.seqno, "Expected -ENOTCONN, received %" PRId64 " (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 5: Action is being sent and received concurrently. In between * two fragments recv thread receives NON_PRIM and then PRIM components. * EXPECTED OUTCOME: CORE_RECV_ACT should receive the action with -ERESTART * instead of seqno. */ ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag usleep (100000); // make sure 1st fragment gets in before new component ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, non_prim)); ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag ck_assert(!CORE_SEND_END (&act_s, act_size)); ck_assert(!CORE_RECV_ACT (&act_r, act_buf, act_size, GCS_ACT_WRITESET)); ck_assert_msg(-ERESTART == act_r.seqno, "Expected -ERESTART, received %" PRId64 " (%s)", act_r.seqno, strerror (-act_r.seqno)); /* * TEST CASE 6: Action has 3 fragments, 2 were sent successfully but the * 3rd failed because backend is in NON_PRIM. In addition NON_PRIM component * happened in between delivered fragments. * subcase 1: new component received first * subcase 2: 3rd fragment is sent first * EXPECTED OUTCOME: CORE_SEND_END should return -ENOTCONN after 3rd * fragment send fails. */ act = act3; act_buf = act3_str; act_size = sizeof(act3_str); act_s.in = act; act_s.size = act_size; // subcase 1 ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!DUMMY_INJECT_COMPONENT (Backend, non_prim)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag usleep (500000); // fail_if_seq ck_assert(!gcs_dummy_set_component(Backend, non_prim)); ck_assert(!CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act_r.out, act_r.size, false, 0, 1)); Cache->free(act_r.out); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 3rd frag ck_assert(!CORE_SEND_END (&act_s, -ENOTCONN)); // subcase 2 ck_assert(!DUMMY_INSTALL_COMPONENT (Backend, prim)); ck_assert(!CORE_SEND_START (&act_s)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 1st frag ck_assert(!DUMMY_INJECT_COMPONENT (Backend, non_prim)); ck_assert(!CORE_SEND_STEP (Core, tout, 1, __LINE__)); // 2nd frag usleep (1000000); ck_assert(!gcs_dummy_set_component(Backend, non_prim)); ck_assert(!CORE_SEND_STEP (Core, 4*tout, 1, __LINE__)); // 3rd frag ck_assert(!CORE_RECV_ACT (&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf(act_r.out, act_r.size, false, 0, 1)); Cache->free(act_r.out); ck_assert(!CORE_SEND_END (&act_s, -ENOTCONN)); gu_free (prim); gu_free (non_prim); core_test_cleanup (); } START_TEST (gcs_core_test_own_v0) { CORE_TEST_OWN(0); } END_TEST START_TEST (gcs_core_test_own_v1) { CORE_TEST_OWN(1); } END_TEST #ifdef GCS_ALLOW_GH74 /* * Disabled test because it is too slow and timeouts on crowded * build systems like e.g. build.opensuse.org */ START_TEST (gcs_core_test_gh74) { gu::Config config; core_test_init(&config, true, "node1"); // set frag size large enough to avoid fragmentation. gu_info ("set payload size = 1024"); core_test_set_payload_size(1024); // new primary comp message. gcs_comp_msg_t* prim = gcs_comp_msg_new (true, false, 0, 2, 0); ck_assert(NULL != prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); // construct state transform request. static const char* req_ptr = "12345"; static const size_t req_size = 6; static const char* donor = ""; // from *any* static const size_t donor_len = strlen(donor) + 1; size_t act_size = req_size + donor_len; char* act_ptr = 0; act_ptr = (char*)gu_malloc(act_size); memcpy(act_ptr, donor, donor_len); memcpy(act_ptr + donor_len, req_ptr, req_size); // serialize request into message. gcs_act_frag_t frg; frg.proto_ver = gcs_core_group_protocol_version(Core); frg.frag_no = 0; frg.act_id = 1; frg.act_size = act_size; frg.act_type = GCS_ACT_STATE_REQ; char msg_buf[1024]; ck_assert(!gcs_act_proto_write(&frg, msg_buf, sizeof(msg_buf))); memcpy(const_cast(frg.frag), act_ptr, act_size); size_t msg_size = act_size + gcs_act_proto_hdr_size(frg.proto_ver); // gu_free(act_ptr); // state exchange message. gu_uuid_t state_uuid; gu_uuid_generate(&state_uuid, NULL, 0); gcs_core_set_state_uuid(Core, &state_uuid); // construct uuid message from node1. size_t uuid_len = sizeof(state_uuid); char uuid_buf[uuid_len]; memcpy(uuid_buf, &state_uuid, uuid_len); gcs_state_msg_t* state_msg = NULL; const gcs_group_t* group = gcs_core_get_group(Core); // state exchange message from node1 state_msg = gcs_group_get_state(group); state_msg->state_uuid = state_uuid; size_t state_len = gcs_state_msg_len (state_msg); char state_buf[state_len]; gcs_state_msg_write (state_buf, state_msg); gcs_state_msg_destroy (state_msg); // state exchange message from node2 state_msg = gcs_state_msg_create(&state_uuid, &GU_UUID_NIL, &GU_UUID_NIL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, 0, GCS_NODE_STATE_NON_PRIM, GCS_NODE_STATE_PRIM, "node2", "127.0.0.1", group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, group->prim_gcs_ver, group->prim_repl_ver, group->prim_appl_ver, 0, // desync count 0); size_t state_len2 = gcs_state_msg_len (state_msg); char state_buf2[state_len2]; gcs_state_msg_write (state_buf2, state_msg); gcs_state_msg_destroy (state_msg); action_t act_r(NULL, NULL, NULL, -1, (gcs_act_type_t)-1, -1, (gu_thread_t)-1); // ========== from node1's view ========== ck_assert(!gcs_dummy_set_component(Backend, prim)); ck_assert(!DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); CORE_RECV_START(&act_r); // we have to start another thread here. // otherwise messages to node1 can not be in right order. for(;;) { usleep(10000); // make sure node1 already changed its status to WAIT_STATE_MSG if (gcs_group_state(group) == GCS_GROUP_WAIT_STATE_MSG) { break; } } // then STR sneaks before new configuration is delivered. ck_assert(gcs_dummy_inject_msg(Backend, msg_buf, msg_size, GCS_MSG_ACTION, 1) == (int)msg_size); // then state exchange message from node2. ck_assert(gcs_dummy_inject_msg(Backend, state_buf2, state_len2, GCS_MSG_STATE_MSG, 1) == (int)state_len2); // expect STR is lost here. ck_assert(!CORE_RECV_END(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf((const gcs_act_cchange_t*)act_r.out, true, 0, 2)); free(act_r.out); core_test_cleanup(); // ========== from node2's view ========== core_test_init(&config, false, "node2"); // set frag size large enough to avoid fragmentation. gu_info ("set payload size = 1024"); core_test_set_payload_size(1024); prim = gcs_comp_msg_new (true, false, 1, 2, 0); ck_assert(NULL != prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); // node1 and node2 joins. // now node2's status == GCS_NODE_STATE_PRIM ck_assert(!gcs_dummy_set_component(Backend, prim)); ck_assert(!DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); ck_assert(gcs_dummy_inject_msg(Backend, uuid_buf, uuid_len, GCS_MSG_STATE_UUID, 0) == (int)uuid_len); ck_assert(gcs_dummy_inject_msg(Backend, state_buf, state_len, GCS_MSG_STATE_MSG, 0) == (int)state_len); ck_assert(!CORE_RECV_ACT(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf((const gcs_act_cchange_t*)act_r.out, true, 1, 2)); free(act_r.out); // then node3 joins. prim = gcs_comp_msg_new (true, false, 1, 3, 0); ck_assert(NULL != prim); gcs_comp_msg_add(prim, "node1", 0); gcs_comp_msg_add(prim, "node2", 1); gcs_comp_msg_add(prim, "node3", 2); ck_assert(!gcs_dummy_set_component(Backend, prim)); ck_assert(!DUMMY_INJECT_COMPONENT(Backend, prim)); gu_free(prim); // generate a new state uuid. gu_uuid_generate(&state_uuid, NULL, 0); memcpy(uuid_buf, &state_uuid, uuid_len); // state exchange message from node3 group = gcs_core_get_group(Core); state_msg = gcs_state_msg_create(&state_uuid, &GU_UUID_NIL, &GU_UUID_NIL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, GCS_SEQNO_ILL, 0, GCS_NODE_STATE_NON_PRIM, GCS_NODE_STATE_PRIM, "node3", "127.0.0.1", group->gcs_proto_ver, group->repl_proto_ver, group->appl_proto_ver, group->prim_gcs_ver, group->prim_repl_ver, group->prim_appl_ver, 0, // desync count 0); size_t state_len3 = gcs_state_msg_len (state_msg); char state_buf3[state_len3]; gcs_state_msg_write (state_buf3, state_msg); gcs_state_msg_destroy (state_msg); // updating state message from node1. group = gcs_core_get_group(Core); state_msg = gcs_group_get_state(group); state_msg->flags = GCS_STATE_FREP | GCS_STATE_FCLA; state_msg->prim_state = GCS_NODE_STATE_JOINED; state_msg->current_state = GCS_NODE_STATE_SYNCED; state_msg->state_uuid = state_uuid; state_msg->name = "node1"; gcs_state_msg_write(state_buf, state_msg); gcs_state_msg_destroy(state_msg); ck_assert(gcs_dummy_inject_msg(Backend, uuid_buf, uuid_len, GCS_MSG_STATE_UUID, 0) == (int)uuid_len); ck_assert(gcs_dummy_inject_msg(Backend, state_buf, state_len, GCS_MSG_STATE_MSG, 0) == (int)state_len); // STR sneaks. // we have to make same message exists in sender queue too. // otherwise we will get following log // "FIFO violation: queue empty when local action received" const struct gu_buf act = {act_ptr, (ssize_t)act_size}; action_t act_s(&act, NULL, NULL, act_size, GCS_ACT_STATE_REQ, -1, (gu_thread_t)-1); CORE_SEND_START(&act_s); for(;;) { usleep(10000); gcs_fifo_lite_t* fifo = gcs_core_get_fifo(Core); void* item = gcs_fifo_lite_get_head(fifo); if (item) { gcs_fifo_lite_release(fifo); break; } } ck_assert(gcs_dummy_inject_msg(Backend, msg_buf, msg_size, GCS_MSG_ACTION, 1) == (int)msg_size); ck_assert(gcs_dummy_inject_msg(Backend, state_buf3, state_len3, GCS_MSG_STATE_MSG, 2) == (int)state_len3); // expect STR and id == -EAGAIN. ck_assert(!CORE_RECV_ACT(&act_r, act_ptr, act_size, GCS_ACT_STATE_REQ)); ck_assert(act_r.seqno == -EAGAIN); free(act_r.out); ck_assert(!CORE_RECV_ACT(&act_r, NULL, UNKNOWN_SIZE, GCS_ACT_CCHANGE)); ck_assert(!core_test_check_conf((const gcs_act_cchange_t*)act_r.out, true, 1, 3)); free(act_r.out); // core_test_cleanup(); // ========== gu_free(act_ptr); } END_TEST #endif /* GCS_ALLOW_GH74 */ #if 0 // requires multinode support from gcs_dummy START_TEST (gcs_core_test_foreign) { core_test_init (); core_test_cleanup (); } END_TEST #endif // 0 Suite *gcs_core_suite(void) { Suite *suite = suite_create("GCS core context"); TCase *tcase = tcase_create("gcs_core"); suite_add_tcase (suite, tcase); tcase_set_timeout(tcase, 60); bool skip = false; if (skip == false) { tcase_add_test (tcase, gcs_code_msg); tcase_add_test (tcase, gcs_core_test_api); tcase_add_test (tcase, gcs_core_test_own_v0); tcase_add_test (tcase, gcs_core_test_own_v1); #ifdef GCS_ALLOW_GH74 tcase_add_test (tcase, gcs_core_test_gh74); #endif /* GCS_ALLOW_GH74 */ // tcase_add_test (tcase, gcs_core_test_foreign); } return suite; } galera-4-26.4.25/gcs/src/unit_tests/gcs_core_test.hpp000644 000164 177776 00000000334 15107057155 023614 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_core_test__ #define __gcs_core_test__ #include extern Suite *gcs_core_suite(void); #endif /* __gu_core_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_proto_test.hpp000644 000164 177776 00000000340 15107057155 024024 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_proto_test__ #define __gcs_proto_test__ #include extern Suite *gcs_proto_suite(void); #endif /* __gu_proto_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_backend_test.hpp000644 000164 177776 00000000350 15107057155 024251 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2015 Codership Oy * * $Id$ */ #ifndef __gcs_backend_test__ #define __gcs_backend_test__ #include extern Suite *gcs_backend_suite(void); #endif /* __gu_backend_test__ */ galera-4-26.4.25/gcs/src/unit_tests/gcs_test_utils.hpp000644 000164 177776 00000007124 15107057155 024030 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2015-2020 Codership Oy */ #ifndef __gcs_test_utils__ #define __gcs_test_utils__ #include "../gcs_group.hpp" #include "../../../gcache/src/GCache.hpp" namespace gcs_test { class InitConfig { public: InitConfig(gu::Config& cfg); InitConfig(gu::Config& cfg, const std::string& base_name); private: void common_ctor(gu::Config& cfg); }; class GcsGroup { public: GcsGroup(); ~GcsGroup(); void init(const char* node_name, const char* inc_addr, gcs_proto_t gcs_proto_ver, int repl_proto_ver, int appl_proto_ver); struct gcs_group* group() { return group_; } struct gcs_group* operator()(){ return group(); } struct gcs_group* operator->(){ return group_; } gu::Config& config() { return conf_; } gcache::GCache* gcache() { return gcache_; } gcs_group_state_t state() const { return group_->state; } gcs_node_state_t node_state() const { return group_->nodes[group_->my_idx].status; } private: void common_ctor(const char* node_name, const char* inc_addr, gcs_proto_t gver, int rver, int aver); void common_dtor(); gu::Config conf_; InitConfig init_; gcache::GCache* gcache_; gcs_group* group_; bool initialized_; }; } /* namespace gcs_test */ struct gt_node { gcs_test::GcsGroup group; char id[GCS_COMP_MEMB_ID_MAX_LEN + 1]; /// ID assigned by the backend explicit gt_node(const char* name = NULL, int gcs_proto_ver = 0); ~gt_node(); gcs_node_state_t state() const { return group.node_state(); } gcs_seqno_t deliver_last_applied(int from, gcs_seqno_t last_applied); }; #define GT_MAX_NODES 10 struct gt_group { struct gt_node* nodes[GT_MAX_NODES]; int nodes_num; int proto_ver; bool primary; explicit gt_group(int num = 0, int gcs_proto_ver = 0, bool prim = true); ~gt_group(); /* deliver new component message to all memebers */ int deliver_component_msg(bool prim); /* perform state exchange between the members */ int perform_state_exchange(); /* add node to group (deliver new component and perform state exchange) * @param new_id should node get new ID? */ int add_node(struct gt_node*, bool new_id); /* NOTE: this function uses simplified and determinitstic algorithm where * dropped node is always replaced by the last one in group. * For our purposes (reproduction of #465) it fits perfectly. * @return dropped node handle */ struct gt_node* drop_node(int idx); /* deliver GCS_MSG_SYNC or GCS_MSG_JOIN msg*/ int deliver_join_sync_msg (int src_idx, gcs_msg_type_t type); /* deliver last_applied message from node from */ gcs_seqno_t deliver_last_applied(int from, gcs_seqno_t last_applied); /* @return true if all nodes in the group see node @param idx with a state * @param check */ bool verify_node_state_across(int idx, gcs_node_state_t check) const; /* start SST on behalf of node idx (joiner) * @return donor idx or negative error code */ int sst_start (int joiner_idx, const char* donor_name); /* Finish SST on behalf of a node idx (joiner or donor) */ int sst_finish(int idx); /* join and sync added node (sst_start() + sst_finish()) */ int sync_node(int joiner_idx); }; #endif /* __gu_test_utils__ */ galera-4-26.4.25/gcs/src/gcs_sm.hpp000644 000164 177776 00000037404 15107057155 020053 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2013 Codership Oy * * $Id$ */ /*! * @file GCS Send Monitor. To ensure fair (FIFO) access to gcs_core_send() */ #ifndef _gcs_sm_h_ #define _gcs_sm_h_ #include "gu_datetime.hpp" #include #include #ifdef GCS_SM_CONCURRENCY #define GCS_SM_CC sm->cc #else #define GCS_SM_CC 1 #endif /* GCS_SM_CONCURRENCY */ typedef struct gcs_sm_user { gu_cond_t* cond; bool wait; } gcs_sm_user_t; typedef struct gcs_sm_stats { long long sample_start;// beginning of the sample period long long pause_start; // start of the pause long long paused_ns; // total nanoseconds paused long long paused_sample; // paused_ns at the beginning of the sample long long send_q_samples; long long send_q_len; long long send_q_len_max; long long send_q_len_min; } gcs_sm_stats_t; typedef struct gcs_sm { gcs_sm_stats_t stats; gu_mutex_t lock; gu_cond_t cond; long cond_wait; unsigned long wait_q_len; unsigned long wait_q_mask; unsigned long wait_q_head; unsigned long wait_q_tail; long users; long users_min; long users_max; long entered; long ret; #ifdef GCS_SM_CONCURRENCY long cc; #endif /* GCS_SM_CONCURRENCY */ bool pause; gu::datetime::Period wait_time; #ifdef GCS_SM_DEBUG #define GCS_SM_HIST_STR_LEN 128 #define GCS_SM_HIST_LEN 1024 char history[GCS_SM_HIST_LEN][GCS_SM_HIST_STR_LEN]; int history_line; #endif /* GCS_SM_DEBUG */ gcs_sm_user_t wait_q[]; } gcs_sm_t; #ifdef GCS_SM_DEBUG #define GCS_SM_HIST_LOG(fmt, ...) \ { \ sm->history_line = (sm->history_line + 1) % GCS_SM_HIST_LEN; \ char* const line(sm->history[sm->history_line]); \ snprintf(line, GCS_SM_HIST_STR_LEN - 1, \ "%8lx|%s(h:%lu,t:%lu,u:%ld,e:%ld,p:%d,r:%ld):%d: " fmt "\n", \ gu_thread_self(), __func__, \ sm->wait_q_head, sm->wait_q_tail, sm->users, sm->entered, \ sm->pause, sm->ret, __LINE__, ##__VA_ARGS__); \ } /*! * Dumps SM state and history to file */ extern void _gcs_sm_dump_state_common(gcs_sm_t* sm, FILE* file); // unprotected #define GCS_SM_ASSERT(expr) \ if (!(expr)) { \ GCS_SM_HIST_LOG("assertion %s failed\n", #expr); \ _gcs_sm_dump_state_common(sm, stderr); \ assert(expr); \ } extern void gcs_sm_dump_state(gcs_sm_t* sm, FILE* file); #else #define GCS_SM_HIST_LOG(fmt, ...) {} #define GCS_SM_ASSERT(expr) assert(expr); #endif /* GCS_SM_DEBUG */ /*! * Creates send monitor * * @param len size of the monitor, should be a power of 2 * @param n concurrency parameter (how many users can enter at the same time) */ extern gcs_sm_t* gcs_sm_create (long len, long n); /*! * Closes monitor for entering and makes all users to exit with error. * (entered users are not affected). Blocks until everybody exits */ extern long gcs_sm_close (gcs_sm_t* sm); /*! * (Re)opens monitor for entering. */ extern long gcs_sm_open (gcs_sm_t* sm); /*! * Deallocates resources associated with the monitor */ extern void gcs_sm_destroy (gcs_sm_t* sm); #define GCS_SM_INCREMENT(cursor) (cursor = ((cursor + 1) & sm->wait_q_mask)) static inline void _gcs_sm_wake_up_next (gcs_sm_t* sm) { long woken = sm->entered; assert (woken >= 0); assert (woken <= GCS_SM_CC); while (woken < GCS_SM_CC && sm->users > 0) { if (gu_likely(sm->wait_q[sm->wait_q_head].wait)) { assert (NULL != sm->wait_q[sm->wait_q_head].cond); // gu_debug ("Waking up %lu", sm->wait_q_head); gu_cond_signal (sm->wait_q[sm->wait_q_head].cond); woken++; GCS_SM_HIST_LOG("signaled %lu", sm->wait_q_head); break; } else { /* skip interrupted */ assert (NULL == sm->wait_q[sm->wait_q_head].cond); gu_debug ("Skipping interrupted: %lu", sm->wait_q_head); sm->users--; if (gu_unlikely(sm->users < sm->users_min)) { sm->users_min = sm->users; } GCS_SM_HIST_LOG("skipped %lu", sm->wait_q_head); GCS_SM_INCREMENT(sm->wait_q_head); } } assert (woken <= GCS_SM_CC); assert (sm->users >= 0); } /* wake up whoever might be waiting there */ static inline void _gcs_sm_wake_up_waiters (gcs_sm_t* sm) { if (gu_unlikely(sm->cond_wait)) { assert (sm->cond_wait > 0); sm->cond_wait--; GCS_SM_HIST_LOG("signal global cond"); gu_cond_signal (&sm->cond); } else if (!sm->pause) { _gcs_sm_wake_up_next(sm); } else { /* gcs_sm_continue() will do the rest */ GCS_SM_HIST_LOG("skipped wake up waiters"); } } static inline void _gcs_sm_leave_common (gcs_sm_t* sm) { GCS_SM_ASSERT(sm->users > 0); sm->users--; if (gu_unlikely(sm->users < sm->users_min)) { sm->users_min = sm->users; } GCS_SM_ASSERT(false == sm->wait_q[sm->wait_q_head].wait); GCS_SM_ASSERT(NULL == sm->wait_q[sm->wait_q_head].cond); GCS_SM_INCREMENT(sm->wait_q_head); _gcs_sm_wake_up_waiters (sm); GCS_SM_HIST_LOG("leaving"); } //#define GCS_SM_SIMULATE_TIMEOUTS static inline int _gcs_sm_enqueue_common (gcs_sm_t* sm, gu_cond_t* cond, bool block, unsigned long tail) { sm->wait_q[tail].cond = cond; sm->wait_q[tail].wait = true; int ret; if (block == true) { GCS_SM_HIST_LOG("queueing at %lu", tail); gu_cond_wait (cond, &sm->lock); assert(tail == sm->wait_q_head || false == sm->wait_q[tail].wait); assert(sm->wait_q[tail].cond == cond || false == sm->wait_q[tail].wait); ret = sm->wait_q[tail].wait ? 0 : -EINTR; } else { gu::datetime::Date abstime(gu::datetime::Date::calendar()); #ifdef GCS_SM_SIMULATE_TIMEOUTS if (tail & 1) #endif abstime = abstime + sm->wait_time; struct timespec ts; abstime._timespec(ts); GCS_SM_HIST_LOG("waiting at %lu", tail); ret = -gu_cond_timedwait(cond, &sm->lock, &ts); if (0 == ret) { ret = sm->wait_q[tail].wait ? 0 : -EINTR; // sm->wait_time is incremented by second each time cond wait // times out, reset back to one second when cond wait succeeds. sm->wait_time = std::max(sm->wait_time*2/3, gu::datetime::Period(gu::datetime::Sec)); } else if (-ETIMEDOUT == ret) { if (sm->wait_time < 10 * gu::datetime::Sec) { gu_debug("send monitor wait timed out, waited for %s", to_string(sm->wait_time).c_str()); } else { gu_warn("send monitor wait timed out, waited for %s", to_string(sm->wait_time).c_str()); } #ifndef GCS_SM_SIMULATE_TIMEOUTS if (tail & 1) #endif sm->wait_time = sm->wait_time + gu::datetime::Sec; } else { gu_error("send monitor timedwait failed with %d: %s", ret, strerror(-ret)); } // to reproduce GAL-495: if (0 == ret && (tail & 1)) { ret = -EINTR; } } sm->wait_q[tail].cond = NULL; sm->wait_q[tail].wait = false; if (gu_unlikely(0 != ret)) GCS_SM_HIST_LOG("%ld wait failed: %d", tail, ret); return ret; } #ifdef GCS_SM_CONCURRENCY #define GCS_SM_HAS_TO_WAIT \ (sm->users > (sm->entered + 1) || sm->entered >= GCS_SM_CC || sm->pause) #else #define GCS_SM_HAS_TO_WAIT (sm->users > 1 || sm->entered >= GCS_SM_CC || sm->pause) #endif /* GCS_SM_CONCURRENCY */ /*! * Synchronize with entry order to the monitor. Must be always followed by * gcs_sm_enter(sm, cond, true) * * @retval -EAGAIN - out of space * @retval -EBADFD - monitor closed * @retval >= 0 queue handle */ static inline long gcs_sm_schedule (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); long ret = sm->ret; if (gu_likely((sm->users < (long)sm->wait_q_len) && (0 == ret))) { sm->users++; if (gu_unlikely(sm->users > sm->users_max)) { sm->users_max = sm->users; } GCS_SM_INCREMENT(sm->wait_q_tail); /* even if we don't queue, cursor * needs to be advanced */ sm->stats.send_q_samples++; if (GCS_SM_HAS_TO_WAIT) { ret = sm->wait_q_tail + 1; // waiter handle /* here we want to distinguish between FC pause and real queue */ sm->stats.send_q_len += sm->users - 1; } GCS_SM_HIST_LOG("scheduled at %lu", sm->wait_q_tail); return ret; // success } else if (0 == ret) { assert (sm->users == (long)sm->wait_q_len); ret = -EAGAIN; } assert(ret < 0); GCS_SM_HIST_LOG("return %ld", sm->wait_q_tail); gu_mutex_unlock (&sm->lock); return ret; } /*! * Enter send monitor critical section * * @param sm send monitor object * @param cond condition to signal to wake up thread in case of wait * @param block if true block until entered or send monitor is closed, * if false enter wait times out eventually * * @retval -EAGAIN - out of space * @retval -EBADFD - monitor closed * @retval -EINTR - was interrupted by another thread * @retval -ETIMEDOUT - timedout waiting for its turn * @retval 0 - successfully entered */ static inline long gcs_sm_enter (gcs_sm_t* sm, gu_cond_t* cond, bool scheduled, bool block) { long ret = 0; /* if scheduled and no queue */ if (gu_likely (scheduled || (ret = gcs_sm_schedule(sm)) >= 0)) { const unsigned long tail(sm->wait_q_tail); /* we want to enqueue at least once, if gcs_sm_schedule() did create a waiter handle (i.e. if GCS_SM_HAS_TO_WAIT was true) */ bool wait = GCS_SM_HAS_TO_WAIT; while (wait && ret >= 0) { ret = _gcs_sm_enqueue_common (sm, cond, block, tail); if (gu_likely((0 == ret))) { ret = sm->ret; /* weaken the condition, so that we do enter if there is room for one more thread */ wait = sm->entered >= GCS_SM_CC; } } assert (ret <= 0); if (gu_likely(0 == ret)) { assert(sm->users > 0); assert(sm->entered < GCS_SM_CC); sm->entered++; #ifdef GCS_SM_SIMULATE_TIMEOUTS if (tail & 1) usleep(1000); #endif } else { if (tail != sm->wait_q_head) { /* was interrupted in the middle, * will be handled by someone else (with tail == head) */ } else { GCS_SM_ASSERT(-EINTR != ret || sm->pause); /* update head, wake up next */ _gcs_sm_leave_common(sm); } } GCS_SM_HIST_LOG("%lu entered: %ld", tail, ret); gu_mutex_unlock (&sm->lock); } else if (ret != -EBADFD){ gu_warn("thread %ld failed to schedule for monitor: %ld (%s)", gu_thread_self(), ret, strerror(-ret)); } return ret; } static inline void gcs_sm_leave (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); GCS_SM_ASSERT(sm->entered > 0); sm->entered--; GCS_SM_ASSERT(sm->entered < GCS_SM_CC); _gcs_sm_leave_common(sm); gu_mutex_unlock (&sm->lock); } static inline void gcs_sm_pause (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); /* don't pause closed monitor */ if (gu_likely(0 == sm->ret) && !sm->pause) { sm->stats.pause_start = gu_time_monotonic(); sm->pause = true; } GCS_SM_HIST_LOG("paused"); gu_mutex_unlock (&sm->lock); } static inline void _gcs_sm_continue_common (gcs_sm_t* sm) { sm->pause = false; _gcs_sm_wake_up_next(sm); /* wake up next waiter if any */ } static inline void gcs_sm_continue (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); if (gu_likely(sm->pause)) { _gcs_sm_continue_common (sm); sm->stats.paused_ns += gu_time_monotonic() - sm->stats.pause_start; } else { gu_debug("Trying to continue unpaused monitor"); } GCS_SM_HIST_LOG("resumed"); gu_mutex_unlock (&sm->lock); } /*! * Interrupts waiter identified by handle (returned by gcs_sm_schedule()) * * @retval 0 - success * @retval -ESRCH - waiter is not in the queue. For practical purposes * it is impossible to discern already interrupted waiter and * the waiter that has entered the monitor */ static inline long gcs_sm_interrupt (gcs_sm_t* sm, long handle) { assert (handle > 0); long ret; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); handle--; if (gu_likely(sm->wait_q[handle].wait)) { assert (sm->wait_q[handle].cond != NULL); sm->wait_q[handle].wait = false; gu_cond_signal (sm->wait_q[handle].cond); GCS_SM_HIST_LOG("interrupted %ld", handle); sm->wait_q[handle].cond = NULL; ret = 0; if (!sm->pause && handle == (long)sm->wait_q_head) { /* gcs_sm_interrupt() was called right after the waiter was * signaled by gcs_sm_continue() or gcs_sm_leave() but before * the waiter has woken up. Wake up the next waiter */ _gcs_sm_wake_up_next(sm); } } else { ret = -ESRCH; GCS_SM_HIST_LOG("interrupted %ld: not found", handle); } gu_mutex_unlock (&sm->lock); return ret; } /*! * Each call to this function resets stats and starts new sampling interval * * @param q_len current send queue length * @param q_len_avg set to an average number of preceding users seen by each * new one (not including itself) (-1 if stats overflown) * @param q_len_max maximum send queue length since last call * @param q_len_min minimum send queue length since last call * @param paused_ns total time paused (nanoseconds) * @param paused_avg set to a fraction of time which monitor spent in a paused * state (-1 if stats overflown) */ extern void gcs_sm_stats_get (gcs_sm_t* sm, int* q_len, int* q_len_max, int* q_len_min, double* q_len_avg, long long* paused_ns, double* paused_avg); /*! resets average/max/min stats calculation */ extern void gcs_sm_stats_flush(gcs_sm_t* sm); /*! Grabs sm object for out-of-order access * @return 0 or negative error code */ static inline long gcs_sm_grab (gcs_sm_t* sm) { long ret; if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); while (!(ret = sm->ret) && sm->entered >= GCS_SM_CC) { sm->cond_wait++; gu_cond_wait (&sm->cond, &sm->lock); } if (ret) { assert (ret < 0); GCS_SM_HIST_LOG("grab failed"); _gcs_sm_wake_up_waiters (sm); } else { assert (sm->entered < GCS_SM_CC); sm->entered++; GCS_SM_HIST_LOG("grab succeeded"); } gu_mutex_unlock (&sm->lock); return ret; } /*! Releases sm object after gcs_sm_grab() */ static inline void gcs_sm_release (gcs_sm_t* sm) { if (gu_unlikely(gu_mutex_lock (&sm->lock))) abort(); sm->entered--; assert(sm->entered >= 0); _gcs_sm_wake_up_waiters (sm); GCS_SM_HIST_LOG("released"); gu_mutex_unlock (&sm->lock); } #endif /* _gcs_sm_h_ */ galera-4-26.4.25/gcs/src/gcs_core.cpp000644 000164 177776 00000151706 15107057155 020361 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2025 Codership Oy * * $Id$ * * * Implementation of the generic communication layer. * See gcs_core.h */ #define GCS_COMP_MSG_ACCESS #include "gcs_core.hpp" #include "gcs_backend.hpp" #include "gcs_comp_msg.hpp" #include "gcs_code_msg.hpp" #include "gcs_error.hpp" #include "gcs_fifo_lite.hpp" #include "gcs_group.hpp" #include "gcs_gcache.hpp" #include #include #include #include #include // for mempcpy #include #include using namespace gcs::core; void gcs_core_register(gu::Config& conf) { gcs_group::register_params(conf); if (gcs_backend_register(reinterpret_cast(&conf))) { gu_throw_fatal << "Could not register backend parmeters"; } } const size_t CORE_FIFO_LEN = (1 << 10); // 1024 elements (no need to have more) const size_t CORE_INIT_BUF_SIZE = (1 << 16); // 65K - IP packet size typedef enum core_state { CORE_PRIMARY, CORE_EXCHANGE, CORE_NON_PRIMARY, CORE_CLOSED, CORE_DESTROYED } core_state_t; struct gcs_core { gcs_core(gu::Config& conf, gcache_t* cache, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver, int gcs_proto_ver = GCS_PROTO_MAX); ~gcs_core() noexcept(false); gu_config_t* config; gcache_t* cache; /* group context */ gcs_group_t group; /* connection per se */ long prim_comp_no; core_state_t state; /* protocol */ int proto_ver; /* send part */ gu_mutex_t send_lock; // serves 3 purposes: // 1) serializes access to backend send() call // 2) synchronizes with configuration changes // 3) synchronizes with close() call void* send_buf; size_t send_buf_len; gcs_seqno_t send_act_no; /* recv part */ gcs_recv_msg_t recv_msg; gcs_seqno_t code_msg_buf; /* local action FIFO */ gcs_fifo_lite_t* fifo; /* backend part */ size_t msg_size; gcs_backend_t backend; // message IO context #ifdef GCS_CORE_TESTING gu_lock_step_t ls; // to lock-step in unit tests gu_uuid_t state_uuid; #endif }; // this is to pass local action info from send to recv thread. typedef struct core_act { gcs_seqno_t sent_act_id; const void* action; size_t action_size; } core_act_t; typedef struct causal_act { gcs_seqno_t* act_id; gu_uuid_t* act_uuid; long* error; gu_mutex_t* mtx; gu_cond_t* cond; } causal_act_t; gcs_core::gcs_core(gu::Config& conf, gcache_t* cache, const char* node_name, const char* inc_addr, int repl_proto_ver, int appl_proto_ver, int gcs_proto_ver) : config(reinterpret_cast(&conf)), cache(cache), group(conf, cache, node_name, inc_addr, gcs_proto_ver, repl_proto_ver,appl_proto_ver), prim_comp_no(), state(), proto_ver(), send_lock(), send_buf(), send_buf_len(), send_act_no(), recv_msg(), code_msg_buf(), fifo(), msg_size(), backend() #ifdef GCS_CORE_TESTING ,ls() // to lock-step in unit tests ,state_uuid() #endif { auto core(this); // to minimize diff { // Need to allocate something, otherwise Spread 3.17.3 freaks out. core->recv_msg.buf = gu_malloc(CORE_INIT_BUF_SIZE); if (core->recv_msg.buf) { core->recv_msg.buf_len = CORE_INIT_BUF_SIZE; core->send_buf = GU_CALLOC(CORE_INIT_BUF_SIZE, char); if (core->send_buf) { core->send_buf_len = CORE_INIT_BUF_SIZE; core->fifo = gcs_fifo_lite_create (CORE_FIFO_LEN, sizeof (core_act_t)); if (core->fifo) { gu_mutex_init (&core->send_lock, NULL); core->proto_ver = -1; // ^^^ shall be bumped in gcs_group_act_conf() core->state = CORE_CLOSED; core->send_act_no = 1; // 0 == no actions sent #ifdef GCS_CORE_TESTING gu_lock_step_init (&core->ls); core->state_uuid = GU_UUID_NIL; #endif return; // success } gu_free (core->send_buf); } gu_free (core->recv_msg.buf); } } gu_throw_fatal << "Failed to initialize GCS core"; } gcs_core_t* gcs_core_create (gu::Config& conf, gcache_t* const cache, const char* const node_name, const char* const inc_addr, int const repl_proto_ver, int const appl_proto_ver, int const gcs_proto_ver) { try { return new gcs_core(conf, cache, node_name, inc_addr, repl_proto_ver, appl_proto_ver, gcs_proto_ver); } catch (...) { return nullptr; } } long gcs_core_init (gcs_core_t* core, const gu::GTID& position) { if (core->state == CORE_CLOSED) { return gcs_group_init_history (&core->group, position); } else { gu_error ("State must be CLOSED"); if (core->state < CORE_CLOSED) return -EBUSY; else // DESTROYED return -EBADFD; } } long gcs_core_open (gcs_core_t* core, const char* channel, const char* url, bool const bstrap) { long ret; if (core->state != CORE_CLOSED) { gu_debug ("gcs_core->state isn't CLOSED: %d", core->state); return -EBADFD; } if (core->backend.conn) { assert (core->backend.destroy); core->backend.destroy (&core->backend); memset (&core->backend, 0, sizeof(core->backend)); } gu_debug ("Initializing backend IO layer"); if (!(ret = gcs_backend_init (&core->backend, url, core->config))){ assert (NULL != core->backend.conn); if (!(ret = core->backend.open (&core->backend, channel, bstrap))) { gcs_fifo_lite_open (core->fifo); core->state = CORE_NON_PRIMARY; } else { gu_error ("Failed to open backend connection: %ld (%s)", ret, strerror(-ret)); core->backend.destroy (&core->backend); } } else { gu_error ("Failed to initialize backend using '%s': %ld (%s)", url, ret, strerror(-ret)); } return ret; } /* Translates different core states into standard errors */ static inline ssize_t core_error (core_state_t state) { switch (state) { case CORE_EXCHANGE: return -EAGAIN; case CORE_NON_PRIMARY: return -ENOTCONN; case CORE_CLOSED: return -ECONNABORTED; case CORE_DESTROYED: return -EBADFD; default: assert(0); return -ENOTRECOVERABLE; } } /*! * Performs an attempt at sending a message (action fragment) with all * required checks while holding a lock, ensuring exclusive access to backend. * * restart flag may be raised if configuration changes and new nodes are * added - that would require all previous members to resend partially sent * actions. */ static inline ssize_t core_msg_send (gcs_core_t* core, const void* msg, size_t msg_len, gcs_msg_type_t msg_type) { ssize_t ret; if (gu_unlikely(0 != gu_mutex_lock (&core->send_lock))) abort(); { if (gu_likely((CORE_PRIMARY == core->state) || (CORE_EXCHANGE == core->state && GCS_MSG_STATE_MSG == msg_type))) { ret = core->backend.send (&core->backend, msg, msg_len, msg_type); if (ret > 0 && ret != (ssize_t)msg_len && GCS_MSG_ACTION != msg_type) { // could not send message in one piece gu_error ("Failed to send complete message of %s type: " "sent %zd out of %zu bytes.", gcs_msg_type_string[msg_type], ret, msg_len); ret = -EMSGSIZE; } } else { ret = core_error (core->state); if (ret >= 0) { gu_fatal ("GCS internal state inconsistency: " "expected error condition."); abort(); // ret = -ENOTRECOVERABLE; } } } gu_mutex_unlock (&core->send_lock); // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } /*! * Repeats attempt at sending the message if -EAGAIN was returned * by core_msg_send() */ static inline ssize_t core_msg_send_retry (gcs_core_t* core, const void* buf, size_t buf_len, gcs_msg_type_t type) { ssize_t ret; while ((ret = core_msg_send (core, buf, buf_len, type)) == -EAGAIN) { /* wait for primary configuration - sleep 0.01 sec */ gu_debug ("Backend requested wait"); usleep (10000); } // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } ssize_t gcs_core_send (gcs_core_t* const conn, const struct gu_buf* const action, size_t act_size, gcs_act_type_t const act_type) { ssize_t ret = 0; ssize_t sent = 0; gcs_act_frag_t frg; ssize_t send_size; const unsigned char proto_ver = conn->proto_ver; const ssize_t hdr_size = gcs_act_proto_hdr_size (proto_ver); core_act_t* local_act; assert (action != NULL); assert (act_size > 0); /* * Action header will be replicated with every message. * It may seem like an extra overhead, but it is tiny * so far and simplifies A LOT. */ /* Initialize action constants */ frg.act_size = act_size; frg.act_type = act_type; frg.act_id = conn->send_act_no; /* incremented for every new action */ frg.frag_no = 0; frg.proto_ver = proto_ver; if ((ret = gcs_act_proto_write (&frg, conn->send_buf, conn->send_buf_len))) return ret; if ((local_act = (core_act_t*)gcs_fifo_lite_get_tail (conn->fifo))) { *local_act = (core_act_t){ conn->send_act_no, action, act_size }; gcs_fifo_lite_push_tail (conn->fifo); } else { ret = core_error (conn->state); gu_error ("Failed to access core FIFO: %zd (%s)", ret, strerror (-ret)); return ret; } int idx = 0; const uint8_t* ptr = (const uint8_t*)action[idx].ptr; ssize_t left = action[idx].size; do { const size_t chunk_size = act_size < frg.frag_len ? act_size : frg.frag_len; /* Here is the only time we have to cast frg.frag */ char* dst = (char*)frg.frag; ssize_t to_copy = chunk_size; while (to_copy > 0) { // gather action bufs into one if (to_copy <= left) { assert(to_copy > 0); memcpy (dst, ptr, to_copy); ptr += to_copy; left -= to_copy; to_copy = 0; } else { assert(left >= 0); memcpy (dst, ptr, left); dst += left; to_copy -= left; idx++; ptr = (const uint8_t*)action[idx].ptr; left = action[idx].size; } } send_size = hdr_size + chunk_size; #ifdef GCS_CORE_TESTING gu_lock_step_wait (&conn->ls); // pause after every fragment gu_info ("Sent %p of size %zu. Total sent: %zu, left: %zu", (char*)conn->send_buf + hdr_size, chunk_size, sent, act_size); #endif ret = core_msg_send_retry (conn, conn->send_buf, send_size, GCS_MSG_ACTION); GU_DBUG_SYNC_WAIT("gcs_core_after_frag_send"); #ifdef GCS_CORE_TESTING // gu_lock_step_wait (&conn->ls); // pause after every fragment // gu_info ("Sent %p of size %zu, ret: %zd. Total sent: %zu, left: %zu", // conn->send_buf + hdr_size, chunk_size, ret, sent, act_size); #endif if (gu_likely(ret > hdr_size)) { assert (ret <= send_size); ret -= hdr_size; sent += ret; act_size -= ret; if (gu_unlikely((size_t)ret < chunk_size)) { /* Could not send all that was copied: */ /* 1. adjust frag_len, don't copy more than we could send */ frg.frag_len = ret; /* 2. move ptr back to point at the first unsent byte */ size_t move_back = chunk_size - ret; size_t ptrdiff = ptr - (uint8_t*)action[idx].ptr; do { if (move_back <= ptrdiff) { ptr -= move_back; assert((size_t)action[idx].size > ptrdiff + move_back); left = action[idx].size - ptrdiff + move_back; break; } else { assert (idx > 0); move_back -= ptrdiff; idx--; ptrdiff = action[idx].size; ptr = (uint8_t*)action[idx].ptr + ptrdiff; } } while (true); } } else { if (ret >= 0) { // we managed to send less than a header, fail gu_fatal ("Cannot send message: header is too big"); ret = -ENOTRECOVERABLE; } /* At this point we have an unsent action in local FIFO * and parts of this action already could have been received * by other group members. * (first parts of action might be even received by this node, * so that there is nothing to remove, but we cannot know for sure) * * 1. Action will never be received completely by this node. Hence * action must be removed from fifo on behalf of sending thr.: */ gcs_fifo_lite_remove (conn->fifo); /* 2. Members will have to discard received fragments. * Two reasons could lead us here: new member(s) in configuration * change or broken connection (leave group). In both cases other * members discard fragments */ goto out; } } while (act_size && gcs_act_proto_inc(conn->send_buf)); assert (0 == act_size); /* successfully sent action, increment send counter */ conn->send_act_no++; ret = sent; out: // gu_debug ("returning: %d (%s)", ret, strerror(-ret)); return ret; } /* A helper for gcs_core_recv(). * Deals with fetching complete message from backend * and reallocates recv buf if needed */ static inline long core_msg_recv (gcs_backend_t* backend, gcs_recv_msg_t* recv_msg, long long timeout) { long ret; ret = backend->recv (backend, recv_msg, timeout); assert(recv_msg->buf || 0 == recv_msg->buf_len); while (gu_unlikely(ret > recv_msg->buf_len)) { /* recv_buf too small, reallocate */ /* sometimes - like in case of component message, we may need to * do reallocation 2 times. This should be fixed in backend */ void* msg = gu_realloc (recv_msg->buf, ret); gu_debug ("Reallocating buffer from %d to %ld bytes", recv_msg->buf_len, ret); if (msg) { /* try again */ recv_msg->buf = msg; recv_msg->buf_len = ret; ret = backend->recv (backend, recv_msg, timeout); /* should be either an error or an exact match */ assert ((ret < 0) || (ret >= recv_msg->buf_len)); } else { /* realloc unsuccessfull, old recv_buf remains */ gu_error ("Failed to reallocate buffer to %ld bytes", ret); ret = -ENOMEM; break; } } assert(recv_msg->buf); if (gu_unlikely(ret < 0)) { gu_debug ("returning %ld: %s\n", ret, strerror(-ret)); } return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_ACTION. * * @return action size, negative error code or 0 to continue. */ static inline ssize_t core_handle_act_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act_rcvd* act) { ssize_t ret = -1; gcs_group_t* group = &core->group; gcs_act_frag_t frg; bool my_msg = (gcs_group_my_idx(group) == msg->sender_idx); bool commonly_supported_version = true; assert (GCS_MSG_ACTION == msg->type); if ((CORE_PRIMARY == core->state) || my_msg){//should always handle own msgs if (gu_unlikely(gcs_act_proto_ver(msg->buf) != gcs_core_proto_ver(core))) { gu_info ("Message with protocol version %d != highest commonly " "supported: %d.", gcs_act_proto_ver(msg->buf), gcs_core_proto_ver(core)); commonly_supported_version = false; if (!my_msg) { gu_info ("Discard message from member %d because of " "not commonly supported version.", msg->sender_idx); return 0; } else { gu_info ("Resend message because of " "not commonly supported version."); } } ret = gcs_act_proto_read (&frg, msg->buf, msg->size); if (gu_unlikely(ret)) { gu_fatal ("Error parsing action fragment header: %zd (%s).", ret, strerror (-ret)); assert (0); return -ENOTRECOVERABLE; } ret = gcs_group_handle_act_msg (group, &frg, msg, act, commonly_supported_version); if (ret > 0) { /* complete action received */ assert (act->act.buf_len == ret); #ifndef GCS_FOR_GARB assert (NULL != act->act.buf); #else assert (NULL == act->act.buf); #endif assert(act->sender_idx == msg->sender_idx); if (gu_likely(!my_msg)) { /* foreign action, must be passed from gcs_group */ assert (GCS_ACT_WRITESET != act->act.type || act->id > 0); } else { /* local action, get from FIFO, should be there already */ core_act_t* local_act; gcs_seqno_t sent_act_id; if ((local_act = (core_act_t*)gcs_fifo_lite_get_head ( core->fifo))){ act->local = (const struct gu_buf*)local_act->action; act->act.buf_len = local_act->action_size; sent_act_id = local_act->sent_act_id; gcs_fifo_lite_pop_head (core->fifo); assert (NULL != act->local); /* NOTE! local_act cannot be used after this point */ /* sanity check */ if (gu_unlikely(sent_act_id != frg.act_id)) { gu_fatal("FIFO violation: expected sent_act_id %" PRId64 " " "found %" PRId64, sent_act_id, frg.act_id); ret = -ENOTRECOVERABLE; } if (gu_unlikely(act->act.buf_len != ret)) { gu_fatal ("Send/recv action size mismatch: %zd/%zd", act->act.buf_len, ret); ret = -ENOTRECOVERABLE; } } else { gu_fatal ("FIFO violation: queue empty when local action " "received"); ret = -ENOTRECOVERABLE; } assert (act->id < 0 || CORE_PRIMARY == core->state); if (gu_unlikely(CORE_PRIMARY != core->state)) { // there can be a tiny race with gcs_core_close(), // so CORE_CLOSED allows TO delivery. assert (act->id < 0 /*#275|| CORE_CLOSED == core->state*/); if (act->id < 0) act->id = core_error (core->state); } } if (gu_unlikely(GCS_ACT_STATE_REQ == act->act.type && ret > 0 && // note: #gh74. // if lingering STR sneaks in when core->state != CORE_PRIMARY // act->id != GCS_SEQNO_ILL (most likely act->id == -EAGAIN) core->state == CORE_PRIMARY)) { #ifdef GCS_FOR_GARB /* ignoring state requests from other nodes (not allocated) */ if (my_msg) { if (act->act.buf_len != act->local[0].size) { gu_fatal ("Protocol violation: state request is fragmented." " Aborting."); abort(); } act->act.buf = act->local[0].ptr; #endif ret = gcs_group_handle_state_request (group, act); assert (ret <= 0 || ret == act->act.buf_len); #ifdef GCS_FOR_GARB if (ret < 0) gu_fatal ("Handling state request failed: %zd",ret); act->act.buf = NULL; } else { act->act.buf_len = 0; act->act.type = GCS_ACT_ERROR; act->id = GCS_SEQNO_ILL; act->sender_idx = -1; ret = 0; } #endif } // gu_debug ("Received action: seqno: %lld, sender: %d, size: %d, " // "act: %p", act->id, msg->sender_idx, ret, act->buf); // gu_debug ("%s", (char*) act->buf); } else if (gu_unlikely(ret < 0)){ gu_fatal ("Failed to handle action fragment: %zd (%s)", ret, strerror(-ret)); return -ENOTRECOVERABLE; } } else { /* Non-primary conf, foreign message - ignore */ gu_info ("Action message in non-primary configuration from " "member %d", msg->sender_idx); ret = 0; } #ifndef NDEBUG if (ret <= 0) { assert (GCS_SEQNO_ILL == act->id); assert (GCS_ACT_ERROR == act->act.type); } #endif return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_LAST. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_last_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act* act) { assert(GCS_MSG_LAST == msg->type); assert(CodeMsg::serial_size() >= msg->size); assert(int(sizeof(uint64_t)) <= msg->size); if (gu_likely(gcs_group_is_primary(&core->group))) { gcs_seqno_t const commit_cut (gcs_group_handle_last_msg(&core->group, msg)); if (0 != commit_cut) { /* commit cut changed */ int const buf_len(sizeof(uint64_t)); void* const buf(malloc(buf_len)); if (gu_likely(NULL != (buf))) { /* #701 - everything that goes into the action buffer * is expected to be serialized. */ gu::serialize8(commit_cut, buf, buf_len, 0); assert(NULL == act->buf); act->buf = buf; act->buf_len = buf_len; act->type = GCS_ACT_COMMIT_CUT; return act->buf_len; } else { gu_fatal ("Out of memory for GCS_ACT_COMMIT_CUT"); return -ENOMEM; } } } else { /* Non-primary - ignore last message */ gu_warn ("Last Applied Action message " "in non-primary configuration from member %d", msg->sender_idx); } return 0; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_LAST. * * @return action size, negative error code or 0 to continue. */ static int core_handle_vote_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act* act) { assert (GCS_MSG_VOTE == msg->type); assert (CodeMsg::serial_size() <= msg->size); VoteResult const res(gcs_group_handle_vote_msg(&core->group, msg)); if (res.seqno != GCS_SEQNO_ILL) { assert(res.seqno > 0); /* voting complete or vote request */ int const buf_len(2 * sizeof(uint64_t)); void* const buf(malloc(buf_len)); if (gu_likely(NULL != (buf))) { gu::serialize8(res.seqno, buf, buf_len, 0); gu::serialize8(res.res, buf, buf_len, 8); assert(NULL == act->buf); act->buf = buf; act->buf_len = buf_len; act->type = GCS_ACT_VOTE; return act->buf_len; } else { gu_fatal ("Out of memory for GCS_ACT_VOTE"); return -ENOMEM; } } return 0; } /*! Common things to do on detected inconsistency */ static int core_handle_inconsistency(gcs_core_t* core, struct gcs_act* act) { core->state = CORE_NON_PRIMARY; act->buf = NULL; act->buf_len = 0; act->type = GCS_ACT_INCONSISTENCY; return -ENOTRECOVERABLE; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_COMPONENT. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_comp_msg (gcs_core_t* const core, struct gcs_recv_msg* const msg, struct gcs_act_rcvd* const rcvd) { ssize_t ret(0); gcs_group_t* const group(&core->group); struct gcs_act* const act(&rcvd->act); assert (GCS_MSG_COMPONENT == msg->type); if (msg->size < (ssize_t)sizeof(gcs_comp_msg_t)) { gu_error ("Malformed component message (size %d < %zu). Ignoring", msg->size, sizeof(gcs_comp_msg_t)); return 0; } if (gu_mutex_lock (&core->send_lock)) abort(); ret = gcs_group_handle_comp_msg (group, (const gcs_comp_msg_t*)msg->buf); switch (ret) { case GCS_GROUP_PRIMARY: /* New primary configuration. This happens if: * - this is first node in group OR * - some nodes disappeared no new nodes appeared * No need for state exchange, return new conf_act right away */ assert (CORE_EXCHANGE != core->state); if (CORE_NON_PRIMARY == core->state) core->state = CORE_PRIMARY; ret = gcs_group_act_conf (group, rcvd, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create PRIM CONF action: %zd (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } assert (ret == act->buf_len); break; case GCS_GROUP_WAIT_STATE_UUID: /* New members, need state exchange. If representative, send UUID */ // if state is CLOSED or DESTROYED we don't do anything if (CORE_CLOSED > core->state) { if (0 == gcs_group_my_idx(group)) { // I'm representative gu_uuid_t uuid; gu_uuid_generate (&uuid, NULL, 0); #ifdef GCS_CORE_TESTING if (gu_uuid_compare(&core->state_uuid, &GU_UUID_NIL)) { uuid = core->state_uuid; } #endif ret = core->backend.send (&core->backend, &uuid, sizeof(uuid), GCS_MSG_STATE_UUID); if (ret < 0) { // if send() failed, it means new configuration change // is on the way. Probably should ignore. switch (-ret) { case EAGAIN: gu_info("Temporary failure in sending state UUID, " "will try again in next primary component"); break; case ENOTCONN: gu_info("Failed to send state UUID: Connection to " "cluster was closed"); break; default: gu_warn("Failed to send state UUID: %zd (%s)", ret, gcs_error_str(-ret)); break; } } else { gu_info ("STATE_EXCHANGE: sent state UUID: " GU_UUID_FORMAT, GU_UUID_ARGS(&uuid)); } } else { gu_info ("STATE EXCHANGE: Waiting for state UUID."); } core->state = CORE_EXCHANGE; } ret = 0; // no action to return, continue break; case GCS_GROUP_NON_PRIMARY: /* Lost primary component */ if (core->state < CORE_CLOSED) { if (gcs_group_my_idx(group) == -1) { // self-leave gcs_fifo_lite_close (core->fifo); core->state = CORE_CLOSED; ret = -gcs_comp_msg_error((const gcs_comp_msg_t*)msg->buf); if (ret < 0) { assert(act->buf == NULL); assert(act->buf_len == 0); act->type = GCS_ACT_ERROR; gu_debug("comp msg error in core %zd", -ret); } } else { // regular non-prim core->state = CORE_NON_PRIMARY; } if (GCS_GROUP_NON_PRIMARY == ret) { // no error in comp msg ret = gcs_group_act_conf (group, rcvd, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create NON-PRIM CONF action: %zd (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } } } else { // ignore in production? assert(0); } assert (ret == act->buf_len || ret < 0); break; case GCS_GROUP_INCONSISTENT: ret = core_handle_inconsistency(core, act); break; case GCS_GROUP_WAIT_STATE_MSG: gu_fatal ("Internal error: gcs_group_handle_comp() returned " "WAIT_STATE_MSG. Can't continue."); ret = -ENOTRECOVERABLE; assert(0); // fall through default: gu_fatal ("Failed to handle component message: %zd (%s)!", ret, strerror (-ret)); assert(0); } gu_mutex_unlock (&core->send_lock); return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_STATE_UUID. * * @return negative error code or 0 to continue. */ static ssize_t core_handle_uuid_msg (gcs_core_t* core, gcs_recv_msg_t* msg) { ssize_t ret = 0; gcs_group_t* group = &core->group; assert (GCS_MSG_STATE_UUID == msg->type); if (GCS_GROUP_WAIT_STATE_UUID == gcs_group_state (group)) { ret = gcs_group_handle_uuid_msg (group, msg); switch (ret) { case GCS_GROUP_WAIT_STATE_MSG: // Need to send state message for state exchange { gcs_state_msg_t* state = gcs_group_get_state (group); if (state) { size_t state_len = gcs_state_msg_len (state); uint8_t state_buf[state_len]; const gu_uuid_t* state_uuid = gcs_state_msg_uuid (state); gcs_state_msg_write (state_buf, state); ret = core_msg_send_retry (core, state_buf, state_len, GCS_MSG_STATE_MSG); if (ret > 0) { gu_info ("STATE EXCHANGE: sent state msg: " GU_UUID_FORMAT, GU_UUID_ARGS(state_uuid)); } else { // This may happen if new configuraiton chage goes on. // What shall we do in this case? Is it unrecoverable? gu_error ("STATE EXCHANGE: failed for: " GU_UUID_FORMAT ": %zd (%s)", GU_UUID_ARGS(state_uuid), ret, strerror(-ret)); } gcs_state_msg_destroy (state); } else { gu_fatal ("Failed to allocate state object."); ret = -ENOTRECOVERABLE; } } break; case GCS_GROUP_WAIT_STATE_UUID: // In case of stray state uuid message break; default: assert(ret < 0); gu_error ("Failed to handle state UUID: %zd (%s)", ret, strerror (-ret)); } } return ret; } /*! * Helper for gcs_core_recv(). Handles GCS_MSG_STATE_MSG. * * @return action size, negative error code or 0 to continue. */ static ssize_t core_handle_state_msg (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act_rcvd* rcvd) { ssize_t ret(0); gcs_group_t* const group(&core->group); assert (GCS_MSG_STATE_MSG == msg->type); if (GCS_GROUP_WAIT_STATE_MSG == gcs_group_state (group)) { if (gu_mutex_lock (&core->send_lock)) abort(); // cast to int is needed to distinguish between positive enums and // negative error codes ret = int(gcs_group_handle_state_msg (group, msg)); switch (ret) { case GCS_GROUP_PRIMARY: case GCS_GROUP_NON_PRIMARY: // state exchange is over, create configuration action // if core is closing we do nothing if (CORE_CLOSED > core->state) { assert (CORE_EXCHANGE == core->state); switch (ret) { case GCS_GROUP_PRIMARY: core->state = CORE_PRIMARY; break; case GCS_GROUP_NON_PRIMARY: core->state = CORE_NON_PRIMARY; break; default: assert (0); } } ret = gcs_group_act_conf (group, rcvd, &core->proto_ver); if (ret < 0) { gu_fatal ("Failed create CONF action: %zd (%s)", ret, strerror (-ret)); assert (0); ret = -ENOTRECOVERABLE; } assert (ret == rcvd->act.buf_len); break; case GCS_GROUP_WAIT_STATE_MSG: // waiting for more state messages ret = 0; break; case GCS_GROUP_INCONSISTENT: ret = core_handle_inconsistency(core, &rcvd->act); break; default: assert (ret < 0); gu_error ("Failed to handle state message: %zd (%s)", ret, strerror (-ret)); } gu_mutex_unlock (&core->send_lock); } return ret; } /* returns code in serialized form */ static gcs_seqno_t core_msg_code (const struct gcs_recv_msg* const msg, int const proto_ver) { if (gu_likely(proto_ver >= 1 && msg->size == gcs::core::CodeMsg::serial_size())) { const gcs::core::CodeMsg* const cm (static_cast(msg->buf)); return gu::htog(cm->code()); } else if (proto_ver == 0 && msg->size == sizeof(gcs_seqno_t)) { return *(static_cast(msg->buf)); // no deserialization } else { log_warn << "Bogus code message size: " << msg->size; assert(0); return gu::htog(gcs_seqno_t(-EINVAL)); } } /*! * Some service actions are for internal use and consist of a single message * (FLOW, JOIN, SYNC) * In this case we can simply use msg->buf as an action buffer, since we * can guarantee that we don't deallocate it. Action here is just a wrapper * to deliver message to the upper level. */ static int core_msg_to_action (gcs_core_t* core, struct gcs_recv_msg* msg, struct gcs_act_rcvd* rcvd) { int ret = 0; gcs_group_t* group = &core->group; struct gcs_act* const act(&rcvd->act); if (GCS_GROUP_PRIMARY == gcs_group_state (group)) { switch (msg->type) { case GCS_MSG_FLOW: // most frequent ret = 1; act->type = GCS_ACT_FLOW; act->buf = msg->buf; act->buf_len = msg->size; break; case GCS_MSG_JOIN: ret = gcs_group_handle_join_msg (group, msg); assert (gcs_group_my_idx(group) == msg->sender_idx || 0 >= ret); if (-ENOTRECOVERABLE == ret) { core->backend.close(&core->backend); // See #165. // There is nobody to pass this error to for graceful shutdown: // application thread is blocked waiting for SST. // Also note that original ret value is not preserved on return // so this must be done here. gu_abort(); } else if (ret != 0) { core->code_msg_buf = core_msg_code(msg, core->proto_ver); act->type = GCS_ACT_JOIN; act->buf = &core->code_msg_buf; act->buf_len = sizeof(core->code_msg_buf); } break; case GCS_MSG_SYNC: ret = gcs_group_handle_sync_msg (group, msg); if (gu_likely(ret != 0)) { core->code_msg_buf = core_msg_code(msg, core->proto_ver); act->type = GCS_ACT_SYNC; act->buf = &core->code_msg_buf; act->buf_len = sizeof(core->code_msg_buf); } break; default: gu_error ("Iternal error. Unexpected message type %s from %d", gcs_msg_type_string[msg->type], msg->sender_idx); assert (0); ret = -EPROTO; } if (ret != 0) { if (ret > 0) rcvd->id = 0; else if (ret < 0) rcvd->id = ret; ret = act->buf_len; } } else { /* Messages which were sent just before cluster partitioning may * be delivered in the following non-primary configuration. This * is expected behavior, so info log level is enough. */ gu_info ("%s message from member %d in non-primary configuration. " "Ignored.", gcs_msg_type_string[msg->type], msg->sender_idx); } return ret; } static long core_msg_causal(gcs_core_t* conn, struct gcs_recv_msg* msg) { if (gu_unlikely(msg->size != sizeof(causal_act_t))) { gu_error("invalid causal act len %d, expected %zu", msg->size, sizeof(causal_act_t)); return -EPROTO; } causal_act_t* act = (causal_act_t*)msg->buf; gu_mutex_lock(act->mtx); { switch (conn->group.state) { case GCS_GROUP_PRIMARY: *act->act_id = conn->group.act_id_; *act->act_uuid = conn->group.group_uuid; break; case GCS_GROUP_WAIT_STATE_UUID: case GCS_GROUP_WAIT_STATE_MSG: *act->error = -EAGAIN; break; default: *act->error = -EPERM; } gu_cond_signal(act->cond); } gu_mutex_unlock(act->mtx); return msg->size; } /*! Receives action */ ssize_t gcs_core_recv (gcs_core_t* conn, struct gcs_act_rcvd* recv_act, long long timeout) { struct gcs_recv_msg* const recv_msg(&conn->recv_msg); ssize_t ret(0); static struct gcs_act_rcvd zero_act( gcs_act(NULL, 0, GCS_ACT_ERROR), NULL, GCS_SEQNO_ILL, -1); *recv_act = zero_act; /* receive messages from group and demultiplex them * until finally some complete action is ready */ do { assert (recv_act->act.buf == NULL); assert (recv_act->act.buf_len == 0); assert (recv_act->act.type == GCS_ACT_ERROR); assert (recv_act->id == GCS_SEQNO_ILL); assert (recv_act->sender_idx == -1); ret = core_msg_recv (&conn->backend, recv_msg, timeout); if (gu_unlikely (ret <= 0)) { goto out; /* backend error while receiving message */ } assert(recv_msg->buf); assert(recv_msg->buf_len >= recv_msg->size); switch (recv_msg->type) { case GCS_MSG_ACTION: ret = core_handle_act_msg(conn, recv_msg, recv_act); assert (ret == recv_act->act.buf_len || ret <= 0); break; case GCS_MSG_LAST: ret = core_handle_last_msg(conn, recv_msg, &recv_act->act); assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len); break; case GCS_MSG_COMPONENT: ret = core_handle_comp_msg (conn, recv_msg, recv_act); // assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len || ret < 0); break; case GCS_MSG_STATE_UUID: ret = core_handle_uuid_msg (conn, recv_msg); // assert (ret >= 0); // hang on error in debug mode ret = 0; // continue waiting for state messages break; case GCS_MSG_STATE_MSG: ret = core_handle_state_msg (conn, recv_msg, recv_act); // assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len || ret < 0); break; case GCS_MSG_JOIN: case GCS_MSG_SYNC: case GCS_MSG_FLOW: ret = core_msg_to_action (conn, recv_msg, recv_act); assert (ret == recv_act->act.buf_len || ret <= 0); break; case GCS_MSG_VOTE: ret = core_handle_vote_msg(conn, recv_msg, &recv_act->act); assert (ret >= 0); // hang on error in debug mode assert (ret == recv_act->act.buf_len); break; case GCS_MSG_CAUSAL: ret = core_msg_causal(conn, recv_msg); assert(recv_msg->sender_idx == gcs_group_my_idx(&conn->group)); assert(ret == recv_msg->size || ret <= 0); ret = 0; // continue waiting for messages break; default: // this normaly should not happen, shall we bother with // protection? gu_warn ("Received unsupported message type: %d, length: %d, " "sender: %d", recv_msg->type, recv_msg->size, recv_msg->sender_idx); // continue looping } } while (0 == ret); /* end of recv loop */ out: assert (ret || GCS_ACT_ERROR == recv_act->act.type); assert (ret == recv_act->act.buf_len || ret < 0); assert (recv_act->id <= 0 || recv_act->act.type == GCS_ACT_WRITESET || recv_act->act.type == GCS_ACT_CCHANGE || recv_act->act.type == GCS_ACT_STATE_REQ); // <- dirty hack assert (recv_act->sender_idx >= 0 || recv_act->act.type != GCS_ACT_WRITESET); // gu_debug ("Returning %d", ret); if (gu_unlikely(ret < 0)) { assert (recv_act->id < 0); assert (GCS_ACT_CCHANGE != recv_act->act.type); if (GCS_ACT_WRITESET == recv_act->act.type && recv_act->act.buf) { gcs_gcache_free (conn->cache, recv_act->act.buf); recv_act->act.buf = NULL; } if (-ENOTRECOVERABLE == ret) { conn->backend.close(&conn->backend); if (GCS_ACT_INCONSISTENCY != recv_act->act.type) { /* inconsistency event must be passed up */ gu_fatal("Unrecoverable error happened above. Aborting..."); usleep(1000000); // give it a second gu_abort(); } } } return ret; } long gcs_core_close (gcs_core_t* core) { long ret; if (!core) return -EBADFD; if (gu_mutex_lock (&core->send_lock)) return -EBADFD; if (core->state >= CORE_CLOSED) { ret = -EBADFD; } else { ret = core->backend.close (&core->backend); } gu_mutex_unlock (&core->send_lock); return ret; } static int core_destroy(gcs_core_t* core) { core_act_t* tmp; if (gu_mutex_lock (&core->send_lock)) return -EBADFD; { if (CORE_CLOSED != core->state) { if (core->state < CORE_CLOSED) gu_error ("Calling destroy() before close()."); gu_mutex_unlock (&core->send_lock); return -EBADFD; } if (core->backend.conn) { gu_debug ("Calling backend.destroy()"); core->backend.destroy (&core->backend); } core->state = CORE_DESTROYED; } gu_mutex_unlock (&core->send_lock); /* at this point all send attempts are isolated */ /* after that we must be able to destroy mutexes */ while (gu_mutex_destroy (&core->send_lock)); /* now noone will interfere */ while ((tmp = (core_act_t*)gcs_fifo_lite_get_head (core->fifo))) { // whatever is in tmp.action is allocated by app., just forget it. gcs_fifo_lite_pop_head (core->fifo); } gcs_fifo_lite_destroy (core->fifo); /* free buffers */ gu_free (core->recv_msg.buf); gu_free (core->send_buf); #ifdef GCS_CORE_TESTING gu_lock_step_destroy (&core->ls); #endif return 0; } gcs_core::~gcs_core() noexcept(false) { int const ret(core_destroy(this)); if (ret) { gu_error("GCS core destructor failed %d (%s)", ret, strerror(ret)); gu_abort(); } } long gcs_core_destroy (gcs_core_t* core) { try { delete core; return 0; } catch (...) { return -1; } } int gcs_core_proto_ver (const gcs_core_t* conn) { return conn->proto_ver; } int gcs_core_set_pkt_size (gcs_core_t* core, int const pkt_size) { if (core->state >= CORE_CLOSED) { gu_error ("Attempt to set packet size on a closed connection."); return -EBADFD; } int const hdr_size(gcs_act_proto_hdr_size(core->proto_ver)); if (hdr_size < 0) return hdr_size; int const min_msg_size(hdr_size + 1); int msg_size(core->backend.msg_size(&core->backend, pkt_size)); if (msg_size < min_msg_size) { gu_warn ("Requested packet size %d is too small, " "using smallest possible: %d", pkt_size, pkt_size + (min_msg_size - msg_size)); msg_size = min_msg_size; } /* even if backend may not support limiting packet size force max message * size at this level */ msg_size = std::min(std::max(min_msg_size, pkt_size), msg_size); gu_info ("Changing maximum packet size to %d, resulting msg size: %d", pkt_size, msg_size); int ret(msg_size - hdr_size); // message payload assert(ret > 0); if (gu_mutex_lock (&core->send_lock)) abort(); { if (core->send_buf_len != (size_t)msg_size) { if (core->state != CORE_DESTROYED) { void* new_send_buf(gu_realloc(core->send_buf, msg_size)); if (new_send_buf) { core->send_buf = new_send_buf; core->send_buf_len = msg_size; memset (core->send_buf, 0, hdr_size); // to pacify valgrind gu_debug ("Message payload (action fragment size): %d", ret); } else { ret = -ENOMEM; } } else { ret = -EBADFD; } } } gu_mutex_unlock (&core->send_lock); return ret; } static inline ssize_t core_send_seqno (gcs_core_t* core, gcs_seqno_t seqno, gcs_msg_type_t msg_type) { gcs_seqno_t const htogs = gcs_seqno_htog (seqno); ssize_t ret = core_msg_send_retry (core, &htogs, sizeof(htogs), msg_type); if (ret > 0) { assert(ret == sizeof(seqno)); } return ret; } static inline int core_send_code (gcs_core_t* const core, const gu::GTID& gtid, int64_t code, gcs_msg_type_t const msg_type) { if (gu_unlikely(core->proto_ver < 1)) { return core_send_seqno (core, code < 0 ? code : gtid.seqno(), msg_type); } CodeMsg const msg(gtid, code); assert(msg.uuid() != GU_UUID_NIL); int ret(core_msg_send_retry (core, msg(), msg.serial_size(), msg_type)); if (ret > 0) { assert(ret == msg.serial_size()); } return ret; } int gcs_core_set_last_applied (gcs_core_t* const core, const gu::GTID& gtid) { return core_send_code (core, gtid, 0, GCS_MSG_LAST); } int gcs_core_send_join (gcs_core_t* const core, const gu::GTID& gtid, int code) { return core_send_code (core, gtid, code, GCS_MSG_JOIN); } int gcs_core_send_sync (gcs_core_t* const core, const gu::GTID& gtid) { return core_send_code (core, gtid, 0, GCS_MSG_SYNC); } int gcs_core_send_vote (gcs_core_t* const core, const gu::GTID& gtid, int64_t code, const void* data, size_t const data_len) { #if 0 // simple code message return core_send_code (core, gtid, code, GCS_MSG_VOTE); #else CodeMsg const cmsg(gtid, code); assert(cmsg.uuid() != GU_UUID_NIL); int const cmsg_size(cmsg.serial_size()); char vmsg[1024] = { 0, }; // try to fit in one ethernet frame assert(cmsg_size < int(sizeof(vmsg))); ::memcpy(&vmsg[0], cmsg(), cmsg_size); int copy_size(int(sizeof(vmsg)) - cmsg_size - 1); // allow for trailing 0 assert(copy_size >= 0); if (size_t(copy_size) > data_len) copy_size = data_len; ::memcpy(&vmsg[cmsg_size], data, copy_size); int const vmsg_size(cmsg_size + copy_size + 1); int ret(core_msg_send_retry(core, &vmsg[0], vmsg_size, GCS_MSG_VOTE)); if (ret > 0) { assert(ret >= cmsg_size); } return ret; #endif } ssize_t gcs_core_send_fc (gcs_core_t* core, const void* const fc, size_t const fc_size) { ssize_t ret; ret = core_msg_send_retry (core, fc, fc_size, GCS_MSG_FLOW); if (ret == (ssize_t)fc_size) { ret = 0; } return ret; } long gcs_core_caused (gcs_core_t* core, gu::GTID& gtid) { long error = 0; gcs_seqno_t act_id = GCS_SEQNO_ILL; gu_uuid_t act_uuid = GU_UUID_NIL; gu_mutex_t mtx; gu_cond_t cond; causal_act_t act = {&act_id, &act_uuid, &error, &mtx, &cond}; gu_mutex_init (&mtx, NULL); gu_cond_init (&cond, NULL); gu_mutex_lock (&mtx); { long ret = core_msg_send_retry (core, &act, sizeof(act), GCS_MSG_CAUSAL); if (ret == sizeof(act)) { gu_cond_wait (&cond, &mtx); if (error == 0) { gtid.set (act_uuid, act_id); } } else { assert (ret < 0); error = ret; } } gu_mutex_unlock (&mtx); gu_mutex_destroy (&mtx); gu_cond_destroy (&cond); return error; } int gcs_core_param_set (gcs_core_t* core, const char* key, const char* value) { if (core->backend.conn) { return gcs_group_param_set(core->group, key, value) && core->backend.param_set(&core->backend, key, value); } else { return 1; } } const char* gcs_core_param_get (gcs_core_t* core, const char* key) { if (core->backend.conn) { return core->backend.param_get (&core->backend, key); } else { return NULL; } } void gcs_core_get_status(gcs_core_t* core, gu::Status& status) { if (gu_mutex_lock(&core->send_lock)) gu_throw_fatal << "could not lock mutex"; if (core->state < CORE_CLOSED) { gcs_group_get_status(&core->group, status); core->backend.status_get(&core->backend, status); } gu_mutex_unlock(&core->send_lock); } void gcs_core_get_protocols(gcs_core_t* core, int& appl, int& repl, int& gcs) { core->group.get_protocols(appl, repl, gcs); } #ifdef GCS_CORE_TESTING gcs_backend_t* gcs_core_get_backend (gcs_core_t* core) { return &core->backend; } void gcs_core_send_lock_step (gcs_core_t* core, bool enable) { gu_lock_step_enable (&core->ls, enable); } long gcs_core_send_step (gcs_core_t* core, long timeout_ms) { return gu_lock_step_cont (&core->ls, timeout_ms); } void gcs_core_set_state_uuid (gcs_core_t* core, const gu_uuid_t* uuid) { core->state_uuid = *uuid; } const gcs_group_t* gcs_core_get_group (const gcs_core_t* core) { return &core->group; } gcs_fifo_lite_t* gcs_core_get_fifo (gcs_core_t* core) { return core->fifo; } #endif /* GCS_CORE_TESTING */ galera-4-26.4.25/gcs/src/gcs_params.hpp000644 000164 177776 00000002136 15107057155 020711 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2010-2014 Codership Oy * * $Id$ */ #ifndef _gcs_params_h_ #define _gcs_params_h_ #include "gu_config.hpp" #include "galerautils.h" struct gcs_params { gcs_params(gu::Config& config); static void register_params(gu::Config& config); double fc_resume_factor; double recv_q_soft_limit; double max_throttle; ssize_t recv_q_hard_limit; long fc_base_limit; long max_packet_size; long fc_debug; bool fc_single_primary; bool sync_donor; }; extern const char* const GCS_PARAMS_FC_FACTOR; extern const char* const GCS_PARAMS_FC_LIMIT; extern const char* const GCS_PARAMS_FC_MASTER_SLAVE; extern const char* const GCS_PARAMS_FC_DEBUG; extern const char* const GCS_PARAMS_SYNC_DONOR; extern const char* const GCS_PARAMS_MAX_PKT_SIZE; extern const char* const GCS_PARAMS_RECV_Q_HARD_LIMIT; extern const char* const GCS_PARAMS_RECV_Q_SOFT_LIMIT; extern const char* const GCS_PARAMS_MAX_THROTTLE; #ifdef GCS_SM_DEBUG extern const char* const GCS_PARAMS_SM_DUMP; #endif /* GCS_SM_DEBUG */ #endif /* _gcs_params_h_ */ galera-4-26.4.25/gcs/src/gcs_seqno.hpp000644 000164 177776 00000000644 15107057155 020555 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2008-2012 Codership Oy * * $Id$ */ /* * Operations on seqno. */ #ifndef _gcs_seqno_h_ #define _gcs_seqno_h_ #include "galerautils.h" #include "gcs.hpp" #define gcs_seqno_le(x) ((gcs_seqno_t)gu_le64(x)) #define gcs_seqno_be(x) ((gcs_seqno_t)gu_be64(x)) #define gcs_seqno_htog(x) ((gcs_seqno_t)htog64(x)) #define gcs_seqno_gtoh gcs_seqno_htog #endif /* _gcs_seqno_h_ */ galera-4-26.4.25/gcs/CMakeLists.txt000644 000164 177776 00000000122 15107057155 020023 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2020 Codership Oy # add_subdirectory(src) galera-4-26.4.25/gcs/doc/000755 000164 177776 00000000000 15107057160 016031 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/gcs/doc/GCS_connection_states.txt000644 000164 177776 00000011774 15107057155 023026 0ustar00jenkinsnogroup000000 000000 GCS CONNECTION STATES (from the application viewpoint) Since GCS is a library to be utilized by an application, it has to export some sort of a Group handle to the application. So far this handle was attempted to have a connection-oriented socket semantics. Reasons for that being: 1) It is better to expand on a well understood and established concept rather than invent something. 2) The whole idea of GCS is to avoid exporting Group Communication concepts to application. It is much easier to work with a socket. 3) The main point of the Group is a linearly serialized stream of messages with a Group being a single source/sink of the messages. This effectively makes Group communication a point-to-point connection. Initially this seemed rather plain to me: when we're part of the primary configuration, we can send and receive messages. When not - all calls just return -ENOTCONN. However, there are certain aspects to GC that make its interpretation as a socket not so straightforward. These are configuration changes, primary/non-primary configurations and state snapshot. For the demo these were deemed not essential and were not addressed. As we're moving on this has to be addressed since we have to settle the API the sooner the better. Basically it goes this way. Whenever DBMS process joins the primary configuration any other way than by configuration change from the previous primary configuration, it has to take a state snapshot (be it incremental or complete) and only after that it can be considered a part of the quorum. It could be done the following way: 1) Process receives configuration change message and decides whether it needs to take a state snapshot. 2) If "yes" then it sends snapshot request message. One of quorum members is dispatches snapshot to the joiner. 3) When the snapshot is complete, the joiner sends the final join message. (maybe "join" is not a good term here, but I'll use it just for now) 4) When the join message is received, every configuration member puts the process in the quorum member list. Only now the process is a full-fledged member of the Group. Note that I've been speaking of two separate memberships here: "configuration" and "quorum". A process is a member of the configuration as soon as it receives a configuration change message (bear in mind, I'm assuming Spread as a communication backend now), so it can receive and theoretically - send messages. However, it does not have the up-to-date state and in case of DBMS: 1) Cannot really apply messages (write sets in our case). 2) Cannot give a snapshot in case of another configuration change, so it cannot be used in quorum calculation. All this makes the process a sort of the "2nd grade" configuration member until it gets a snapshot. The problem is that every configuration member has to be notified when the snapshot is complete, hence we need this "join" message. As a result, state machine for the GCS connection will get one more state: own JOIN message received +-------------------------+ ______ | V V \ gcs_open() +----------+ +---------------+ | conf. -------------->| GCS_OPEN | | GCS_CONNECTED | | change +----------+ +---------------+ | to PRIM ^ | \______/ +-------------------------+ own LEAVE message received, conf. change to NON-PRIM Rough explanation: GCS_OPEN (perhaps should be split into OPEN_PRIM and OPEN_NON_PRIM). Only snapshot request and join messages are allowed. Attempt to send anything else results in -ENOTCONN. Attempt to send join message when in non-primary configuration should result in -ECONNREFUSED. GCS_CONNECTED. Attempt to send snapshot request or join message results in -EISCONN. Application messages are sent alright. When GCS_CONNECTED->GCS_OPEN change happens all pending GCS calls return -ECONNRESET. So GCS API is about to get more complicated. And here we have two alternatives: 1) Implicitly expose GCS connection state to the application through those error codes. Application will have to keep its own track of GCS connection state and not forget to send join message. In this case API can stay the same, but it's usage will get a bit more complicated. 2) Application can provide request_snapshot(), send_snapshot() and receive_snapshot() callbacks to the library. Then all this could be handled by the library and application would not have to know anything about snapshot request or join messages. This won't simplify the application much though: callbacks will have to be able to communicate and synchronize with other threads, since in this case application will have no control on when the send or receive callback is called. This also would mean additional 4 parameters for gcs_open() (3 callbacks + context) and make GCS connection much less of a "socket". galera-4-26.4.25/gcs/doc/Coding_conventions.txt000644 000164 177776 00000005336 15107057155 022435 0ustar00jenkinsnogroup000000 000000 These coding conventions were not set from the very beginning. They emerged as a result of coding. Therefore they have some practical basis for it. Not all code adheres to them, but that's what it should be like. Attempt was made to justify at least some of these conventions, but it's not about right or wrong really. It's about consistency. 1. Indentation. Tab width is 4, tabs filled with spaces. No real tabs. There is a controversy about this issue among programmers. In defense of this decision I can say that indentation is some sort of ASCII art and ASCII art does not always work good with tabs, especially when you need to alter it by hand. In other words: spaces are less flexible, but more predictable. 2. Braces position. Opening brace on the same line as the statement, see example below. 3. Spaces between tokens. See example below. 4. Function declarations/definitions. See example below. 5. Naming conventions. All names and identifiers are all lower case with underscores except macros which are all UPPER case. 5.1 File names. All C file names are prefixed with 'gcs' to avoid collisions in the file namespace with headers from other software. Prefix is followed by the module name. If module consists of more than one unit, unit names follow module name. Like gcs_module_unit1.[h|c], gcs_module_unit2.[h|c] and so on. 5.2 Symbol names. All global symbols - exported through header files - are prefixed with 'gcs' (or 'GCS' where appropriate) followed by the module name. This is done again to avoid namespace collisions with the third party software and with global symbols from other modules that may be called similarly. Static symbols defined in the *.c files simply start with the module name. This is done to easily distinguish between global and static symbols and to prevent collisions of static symbols from different modules when doing tags search. Example: int gcs_module_external_var; static int module_static_function (int a, int b) { int c; if (a < b) { c = b - a; } else { c = a - b; } return c; } 6. Long lines. Lines should not exceed 80 characters in length. One more reason to use spaces instead of tabs. (Suppose you have tabs of width 4 and then some other person reads the code with tabs width of 8. Many lines will grow more than 80 characters and may not fit in the screen.) 7. Spaces at the end of line. Should be avoided to minimize the diff. 8. NO COMPILER WARNINGS WHATSOEVER. These conventions are not set in stone and can be altered eventually. However, for the sake of consistency, try to follow these conventions unless there is a REAL (and well articulated) need to do otherwise. IF IN DOUBT - SEE HOW SIMILAR THINGS ARE DONE IN RECENT FILES. galera-4-26.4.25/gcs/doc/GCS_Architecture.png000644 000164 177776 00000102406 15107057155 021664 0ustar00jenkinsnogroup000000 000000 ‰PNG  IHDR´ˆäþ[bKGDÿÿÿ ½§“ pHYsaa¨?§itIME×  ºL  IDATxÚìÝwTYð7HoR”ª" ˆ¨X@A°®ì°b]ÁÎÚ×Þu]»®‚{Ãöµ  Rm ( ‚tPé=ßãfó…„žßñxÈdrgîdîÌ3wJXl6›þ%E€p€p€põLppðܹsUUU9Clllºw”Vw– OeK× Ä"¨œ‚‚‚Ç{zz¾}û¶¸¸ØÜÜ|îܹ–––l6{Ô¨QÑÑÑÜã—””HHT5އ……™ššÖä³Ëª£"<µÉ’©ŽÊ–®;Â7xðବ¬íÛ·÷ïß_FFæÝ»w'NdFxóæ²²2÷Güüüª>Ý“'OÖpM«£"<µÉ’©ŽÊ–®;@ÁÂã“*Ñgеk×øøøÐÐPmmmî·Ž?>}út6›Íf³™£a6±ÏŸ?·oß>77·&›­È+R+µ¨\e«ãK¨pÍ@…=z444tñâÅ<É€ˆÆ÷3w³XÜÃçÍ›§¦¦Æ=0((è—_~‘““ÓÕÕݳgϘ999óæÍkܸ±Á‹/ˆhÈ!¹¹¹Lá’’ü»ýbbb†®¬¬,//oiiÉ|0$$ÄÕÕµiÓ¦ÉÉÉcÆŒQPP055õòòb>Rö»ÂTäëׯ'NTSSSPP>|x||¼ 9)] ¾ÆÅÅ;VYYYYYyĈŸ?.wáðÅwÆÊ˜7žÊò¼$¢ØØØ~ýúÉÊÊÚÛÛ”@L° ‚¬­­‰èÝ»weÆÝÄš5kÆÓâÂÃÜœ233W¯^MD§OŸæsûö퉉‰çÏŸ'¢öíÛ—.“/333"úðáÃÍ›7‰ÈØØ˜Ífkjj2ܰaCjjjHHkîÞ½[î»åV$11±Y³f666±±±qqq]ºt4'˜––¦§§7hР´´´/^¨¨¨hjjÆÅÅ•»pxš1áç­ô˾}û^¸p!777((ˆ)ÍÄÖl€ STT$"¦c\ÈpðýûwžÝÌøñã™Cd6›‘‘ADÌ®‹ù›ˆòòòØlv~~>5jÔHÈpбcÇV­Z±Ù좢""’””d³Ùéééœ"ÌhÇŽ#¢¾}û–ûn¹qvv&¢ÀÀ@æåþýûŒŒ͉0.[¶Œé$`^þñÇD4mÚ´rA3&ü¼•~)--ýüùsæïׯ_#Âü·‡ ¢üü|áÃAII Ïn¦ô) 999¾cr¿²ÃïÞ½{666œ‘K—GDÊÊÊå¾[nEtttˆ¨°°P˜9¦@ÎîŸéb!"MMÍr²gL˜y+ýÒÐÐPZZzæÌ™oß¾EC„ø±±1ýóÏ?‡ƒÒ/™‹rrr*ôÁrÃAVVÖ¨Q£ÔÔÔ®]»VÆ ‰HFF¦ïò­HZZšsRn5âyyyÜ÷‡A3V•y»sçNÓ¦M™#FŒà[8€À‰6lØ0æ”|U iܸ1={öL´ó¶téÒ«W¯^¸paĈeŒ–™™ID-[¶¬Ä»<˜g=|ø°rsRš††EEEý¼jZB‚ˆ +º(ÍXUæÍÖÖöŸþ9tèP»ví®_¿Î¹þ$4tIIIMš4Ñ××/}àèååÕ³gOö¿'³‰¨¸¸¸ôK6›=räH"jÕªUhh(›Íö÷÷ïÕ«稈JJJ¸;Ò™–Ûl™ë!˜S¥{83ÌÜŒ°lÙ²rß-·"öööLE"##Ùl¶ŸŸ_ç΢ž—¥ œ9s&sÉ!ó2,,ŒˆV­ZUîÂá!hÆÊ˜·r+KD~~~ÌßgΜ‘••EsœV€Ÿ|}}UTTŒ/_¾üíÛ·’’’œœœ'Ož4kÖŒ¹’îåË—Ì~åÕ«WÜ/˜BBBdee9·ÌÉÊÊúûû3%3™ÐÀ\õÆù sËßýû÷™RšžžÓ«qáÂæF;oooÎ.ÐÅÅåÛ·o‘‘‘&&&-Z´à¤2Þ-·"oÞ¼‘““c***ª««_ºt©Œ9á©Eé™; ÔÕÕýüü222ìíí[¶lùíÛ·rA3VƼ ª,ó’YP¾¾¾ùùù×®]cò°9û° IIIÉÊʶhÑb̘1ÌžýÿÏÌ)ý’ñüùsKKKiiéŽ;þý÷ßÂ|p×®] ]ºt‰‰‰á;WgΜQTT477÷î]Ïž=õôô¨R8@2›mûÌÑ~Ø¢’ëw5Ì @ÃjÚ•j}ÕÚôþÛÚ`¯Q#)¡Šù òáÉ™ ¢É)¡^äƒJ†$d¤„ºŸ¨R— T8à~„ú  Î¦ìVª)P¥.A¨X8@‡b@u¤D„:E‹@¼“bÔ¯ˆ€|Pýí<¨@8@·’ÿ Uö®„Jˆ¬ öù .äôˆg,¨ÉýèϼÏÙ£¯[W¥âx>þïKÄä¨1è0eD "çu¢H ÈÈȵ×y l8À9$ƒJ¦„ Ed@>@ψwÛ¶#™ê]8@·º DÖ‘À ù NêôÂø%‘ßb Û ,¨ÞFŠFWÙç|Øû î,„Ùïã´BÃí3`³Ý±èþ¿ÎXPÐèÐâê ,¨Ë˜“ XPx " @Â΄¨tÂ4Dè<@8¡ Û áBç @Eàœ Â ‚káá žÄ"€ÚÅb93°ÙîeŒ–’’9cÆé“'§¨ªÊOštÜÕµ¯¹¹–^¹Â¾¾ºt4&僄D#ËVû´s\wÑžçñYï¾<¿r2:彬”‚Ép ƒþ›®N`ÆÉÊû~áÅŽ·_|Y,‰î&#º ^Ù±ì§oåo¸2nÍèsR’2ÜÃ[p}érR Í›´¶íàÔ¶¹5÷[D„§{Õhqµ‰ÉœˆÀWZZö°a÷ìqPU•'¢;íG:´g£…¶VeyþñÆëÏwí­\uÕZåäg¾ùòì€Ïžqî…žŠHk³DGÕðGNj`Ôƒ]·gqÞ=òpE»æÖz¬(,Îîµñêør'ý·œ”‚ïG¯>m¸‡»ýúêKÚÇ-W'ý9鞢L㌼ôèÇG®œÜ{-ç­#Oµhb‚/®Ö¡ÅN+@=0}ºÇºuC,-õ™—J/Îtv>“…#ðÈ/#îùG¯¹v{ôššJ6’V–Wïn2b´åÿ…ƒ˜Ô°èdzmw5S7–h¤ª¨ÙÏlÂüû8#|J émj/+%¯$«j×qÊο–;]ÿˆÛv¦~I /*)äÎbIè5iCDÊrjTä›ö6µŸÔkõõ×9oé55e±°QB‹„¨·X,çøøï66Û%$\¬­ÿà~ëÆÐ1c܇ýëÂ…îñ¯_qt<2dÈ‹+ÐoìçUPPlkÛ–{ ŽŽŠƒƒÅ_=Æ!Èý7g˜M’”âÈs\þðí¹þ&IüÿþX_£]#‰Ÿ}ŠZö¾êQ\Rļ´n=¬ì‰þÈIý'ù]ûÝ;êõzññf¹3Ù¾¹Mò/ø²ª£ÅÑ¥K[ddæ°XÎ&&kˆÈÒrkPÐ <(è‹•Õ6´8@8QZ¼øòæÍ#Š‹½x±Œ3ðÔ)w÷§»w;xxL¹x1ÀÃÃóÖ“'û÷=qbòñã/îßr*—.Þ¡ôðáÃ;ðl Û»/Ï´;ó l¢¬Ë}Fÿc|€¡&Ÿe{p†?óÇä^kÓ2Ö\õ,ìjQI¡ª‚FÙ¼Œôn¯×Cª‘´™^€Ï÷JJŠËžÉèÔêJ:ø²ª£Åyxø­^íµk—}fæ^6Û=<|Í›÷˾}8ŸÝ»÷á¼y}ÐâáDiÒ$«Þ½Y¬ÿ{”é–-ÞGŽLjÞ\UMMáС Û·ßå¼µ{·ƒ††RÓ¦J{ö8œ<ùBÈ©„…%èë7)=ÜÀ É‡ øù–$'­È=Äù°÷?"ÊÈIU”U)£)I™É½×θ/.-rÓåñŸï•=QÿHï.†ˆˆÅ’hÛ¬ÛËO>¥Ç Žþ›ˆò s>ß;|™]‡Éø²ª£ÅmÞì}æÌôž=¤¥ÿ»¶ÌÁÁüñ㈔”L"JIÉ|ú4ÒÁÁ-JÉPy¶+=ðóç]Ýÿk$%ùÐV­4’“3«8õ’6 OXLNFé[V"÷qù¡__Ť|Øv}ʲ'ô›¶%"Y•¼‚,y岋ÒRi9®û²œ‚̯•`5êlЗïh±©¿e'›6³b^v7±ëö,+£A<—yð{qI‘¤„”–ªþ(ËùÝŒ‡à˪ŽÖ¡C3ž‘¥¥%§Lévøð³•+¹»?›2ÅZJªZ @µÓÑQyñb™®nYÇ£?&êé© Y ©©öçÏ)¥‡GE¥¶m‹iZiu ˆz`ÛÁ‰3D‚%¡¯ÑŽˆ 4Ú3CÚ·è“ÞF·« Bœ[ü1ÑGE¾)ÉK+·Y¶Íkª pàq+'?cöQ+îQ˜¾Î9 ¨Ö§§§þêU´!ïwêÜÓÆfûÂ…ýÎ}z-¸á´ˆ˜«kß1cÜcŠ‹KxÞZ´èRjjVbb†«ëÅ™3{ Y ½½ù¡¥‡{y…8:Z` bkæä|,.-¢Œq†˜ÿêvMlîO?\Ùë=—ó2ðó}Îß2RryY|‹*.)zõùî:û‹î38ÿ68\¹|‚§|¨™·|¹Ý„ Çîß+((⮭ݸ[7ƒI“Ž[[ji)£Åz &üö[?99é‰ú”\TT¢­Ý8>~;ó–¥¥þÌ™gŠ‹K.ì×µkKž2:(ý($++YY)ŸwÜ}ª ?®\ ~òd‘¨f›ç9<Œzý4C­CÍÿ¼ù«]Ç)] mÕµr ²>%†p£®¨mi4ðàÝEÃ-\´U 2sÓŸ…_vuÙðãœq®½: !!iÙÊNBBòVÐÑ.†¶¥§UP”w+ðp'ý>|H(((ân‰óçÿbeµíåËßùX3-ê8›]~¨g±XîÈþu‹³«*».çÃe?‘Pô«˹ÒSLOÏ<øÀÎc¬­ ‰(99sôh·Ý»DõHËÙ}fß|ÀW= ŸC¼ñüœš•ÿCVJA¯i›ÖÃyúùCcžzûšþ©‘„¤i3«Ñ– šü»w>l±ÁñêóðëO®‘…AGëÅÒ’²ÂÂAÝD”’’9}ú)©ªªò'ÿí7Q>Ì•³©&àá¾ öá ÆZÂA 8­õCÓ¦J7nÌaþ>sfZ5M¥ìþÄ@‹ƒ$B ©/ÇL‚’ 4\|s€ðW$ 4 |€ˆÈ¼#"ÂòÁÏ¿áxÏ/ "Ô.ÜLjpPó" €ÀЀˆP—öLlìŠ@Ø]Î, "4$¸Uáá@·Â½ïÁ™DQ“Ä"h°X,g,„ŽÄõëŽÌø%h4:@·A]\WÙl¶+4 ßJn6¬zºi°;HžžD¨¹]l½ÝV`7$ªý…0û}œV‹CRœ\¨‡½8Ñ€dPg! "4”],’  "@ýŽH€|€ˆ€ˆ€d€pˆ€$„¨™ÎìQêoP@2@8€jÉ8¿€ˆPO»  Ú»° áÊß#@ðøäêíó¡tD <€Ä"ðI$„¨é|€½"b Àÿå"b"v$ˆT~g±áêPD@J@D€w\þï*ÁyY‹ç8“F,¨Ë𫌠{ÃñoJ¨á£¨­Í¾0û}ô •Ö~:ÁÆ¢ú–- ÅA%àVFq |Ü›~g ú‡á !Š ~Æ.)AD„ "úúéí.ç>)_£x†#"Â@uçä¶Ž}Fj4oÅ÷]D¨(\P¿¥ÅGÜ¿¸ü¤Ù£ñ<ë¦zÄÖ½Ó¶6ï­×Æ<)æãçÐç…ù¹eGô"zÄYfzòó'\¶_q_:æÝ Ÿ&:ú?ÒÇ.Ù×Õn|IM­É‘#—‰híÚêêMÏœñæ.yÍ×£G¯ÄdzÝÜ.¸¸82¹ Ù¿ë³gáDÏž;w¹«ë”OŸÂ9wqqts»Àýñ… §}úþè‘Ï™3Þnnç9erc>W|çÎõË—O‘¿ÿ··øú~TA!Gà S§°ØìòûY,º ¡: SºV\ÚµÐb€£~;Ë{§þôõ:¶þÒ–„Àã澆§^•ýc <±@ì¿Sg VÙOdÎ÷¿2l˜ÍíÛ/MLþ[zææÍ☿åää?Îîڵ僡ÊÊ9ãôïßéàÁ³FFm‚ƒ_­[·ÐËË·ì QéB8oåååÈsf˜ûZæïüü<}}9¾#”.-#㻉‰*ów颸VPÈÑÊ^žØPˆjÕf¿kû…»˜?˜g,ò$ƒ¼œLÏ-.ŸCŸ«iëµï>¸«Ýx澆²¬×"ðÕ¶mÇY³/]êìåå˹³499g×›˜øU^^{ȤIΧN¹mܸ×ÓóÈøñÓyŠe®p乎¡t!²²reϧŒŒ¬ð•RVV){„Ò¬ÊhP+pZ ábž±È3ðöÑÌ} Cg®û‘š°eR—ÔøhãÎ½Š <6L[b«ýçôîKít˜g2QRÌÇ[‡×s"N4ðX°`UNNö™3‡9CÔÔšðŒ£®Þ4--™{ȨQ||®¥¤$=|è=t¨ÏøœóÜ;×Ò…T“’ï-­°°€o+7 @]ñ%,¹¯¡µE‡…»õÛvíj7^EC—óc ;î'ÏÞéåå¶úÙµ#YßSïzl äéEhàé-OLüJDRRR{öœØ¶m¥§ç‘´´"4h”›ÛÎÜÜœˆˆãÆÙÑ A£/_>]\\ñÁÚº)**õí;è×_Çôï?DP@^^®¯ïC"ÊÍÍá[ˆ¤¤dzzjPÐKfNâãc™JII1—úø\“’’"¢ØØh"JJŠ'¢¤¤N¸1Ÿ*,,X´hÏÀŒŒ'N "KK¾r4@8€:Šï} Ù?ÒøüXC ãÎ}^Ü:ÙÚ¢w E… <"˜›7'¢ÎÞÐØ®]§©Sç,Y2³}{/¯\ùÇ«W¾&&*Ӧܰa­X±5 ÀÏÈHiî܉ ®e>åääòê•ïøñ3J—ïævÁÅeìÀ]dddŒÚìØ±–o!ÇíÛ×L]½)3'-¸>Ãbz ˆÈÒRŸˆ:uÒ%¢Nt8U(5QGsóæC‡Úó̉ƒCß>}ì Œÿüó0•® £AÝ ¡6áÌtÝäwËãÕ³ù9Y ÑaF{ÌÞåòøúéM¿î¸ŸÌóHæ‹;]Ã^>(..’Sl½éW"ÊËÉ||ù`ï1³Ÿ\>4nÙ_*ºD¤×ƼµEŸ·¾·™¤~Ú7Ï®ô¹œh@ψ cóÞ‹?aþ~zÅ]I¥©¬‚²BcuS«œq2Òýûƒ„wOýù1ðñ¶©ÝXÄêØgäÀ©¿7’”B/ˆ§äØHÛ)ËØìy%α!Ñï_M^sœI ~7ONY{¢µEŸØ!§7ýZT˜?böæÒ½õ%"p~zဉ+܉('ã›ç—7NX›š‘:¯’F IDAT–xríd‹cµ L‰èÁÙ=:­Úu±GDm­íºÚ~÷JPiu?"ÔÓ. g DOM[oâJw"ÊÍúñäò!»ÉË8c~xyŸy‚Bzâ—§WÜ‚ÿ¾†kÐsâI¿åÚ‹ï“¿DHJ˪kë1Ÿ\>ÔX]«SŸQÌËð×¢ÞÏÝ}33=yãøŽí¬Š‹‹În›=iÕ³Cê]/B=ê6ÀB„¨,KS¯5÷äØO¶S–±$~vUÞ9¹ÍràD Ý࿯äÏÙsSB¢ÑûwÜ—Ùx-’y 3"BõE,7(N+@µsZ}ÔfØ4æïØÁá¯ÚN^JD†fÝdäÎlv.*ÈokmçzðßdÀp¢áÄM\ä›®vã5õZçeg(«k-9ú쟷þL³Îü–bÐÞJ˜*×a€…PGu2yÚ†Óù9YÛ§wÏHKÔÔkýû©W’RÒ^‡VU¨D@8±"-§ÐҴ˶)VÁ®æeg4’”bQeöXˆH€pb‚Åb9­96zÁŸ÷=w­ÕZ¢‘ä™k+]"‚h%D}ØêÔu–¥ÔüžJõ:”`5¨| e³ÙÂ4c\Ý Õ×t±vAõí°v ÚG–±dþ˜Ú­×˜Y]íÆK4’¬•Å—^öòf¿[@¬à¦Çª=ýüÎj°Ö¥† §@<#N4T:Läçdq–XQA¾ç—½/èÕØs‹KQA>3NàƒK«GͶúù«Zì’’nk–Øjϵ–;¸pxvF:O.a³ÙÞÇ61#ì™ÝVWIÎäüny¬wh7ÛJz½C»è÷üª½z¤Ñ\k¹ÙVÒ›Æwúúœ¸g΀ß~Q›m%½Ëå—×wÏ1SäÌ<÷ ªß©G½õß:Ùr¶•ôâþ'ÖNn€«N+@íÓ`킚húª‚Üì cÍæZË_ãd`Ö¾gv?×Þ*•«#6"dëä®smä/ïY<}óÙø½ã‚D¨M¸ êÔªÈÐâªONæ÷µ£Mþ¼—ˆ¯VÖaöûè9àÝ_¢¡:<½âV˜Ÿ›Ÿ›}ûèÆx‰_ý‚pð_DÀ‰†êãï}Ƶê’šé 1£]ÿÄ©Ëp·ÀÿuàŽ†j²ô˜o L¿­€pP½YA„¡îtBÔ…9ÁþለÍv¯Ýê°XÎug6 .Ã5åO‹Íµl¶{­'@8«^\®X/¾&,„D„D„D¾„QîéëTDØ´É[FfÎùó¯k¦úsæœSP˜ôk ÔÑ^„U«­_?´wïÖDÔ½ûv‘̤½½ûË—ÿð}믿ÆíÜiß®ÏpQMê‘î‹6"DD$ii)‘¯ïÒªÏ[rræ§O)z‚FøçŸTiiއ߈dÒ€p€^„ÊG„§O#-,¶¨¨¸Þ¸*##£¬¼€Årþð!ˆ.¼$)9‹ó¡ÀÀYÙ9ïßÇ{xø©ªþvê”?§œ«Wƒ Wš˜¬9uÊ_Ccñúõ·tu—†„ÄJJÎ"¢ïßs&L8&/?wРýÌøyÙÙùl6{Õ*¯Æ<{É=é+®+*ÎÏÉ)(**qu½¨¤4ßÏ/Šˆ’’2ìíÝ•”æ³XÎÌ¿øøïµ¾ð±"Ôˆ Œóç_Ï{ÎÍm§O›V¯¾Ñ½{+ss½ÆuèÐÌÔT›ˆÜÜž$%íà<ÈÈÜ\ïøñÉ7o¾QWWxòdÑŽ÷˜áGú®Zåuîܯϟ/;zÔwРvk×™3§ÏÎöl¶{AAÑÀûûö5IIÙ•Â|ÄÇç™Y³?ÿ¼×³§ÑÌ™=ed¤¸'½eË)ÒÒ’..gììÚöíkÒºµ&9:16ÖüúuûÛ·kee¥Ølw|õ šˆž¸|ùµ;wæ[Xè5i¢Èf³{õ2&¢+W‚&L°dÆ<¸ý³g‘ÜŸºyóMZZö!f††M¿|I'¢ÈÈäõëo=xàÚµkKuu…¬¬¼¡C͈èîÝ÷}ûšÑÞ½,-õ§M³QP ßÀ)çÇÜnÝ  0 ˆŽ‰Iã™ôÓ§‘³gŸ6ͦsçwî¼/**!¢€€˜±c»HH°.^ èÔ©9¾n„¨@D(wœõëo­^=˜9òöõý”߬™jVVþƒaãÆuaÆ9vlò¶mwb˜—EE%÷ïøí·¾DôüùçnÝ ˆhÛ¶;¿ÿnÇ”“””‘<`€é—/éiiÙffºDäáá7}º ÷¤‹‹K||Þéé©õèaôý{S÷¤‹ŠJÎ{íà`nmm×¶­Ž††mÜ8ÌÒr«¦æâ7oâ.]ª¿˜€[jtÿ]­îÞ}?zt'"JOÏÞ¹ó>Ómpûö[ ½fÍT™q”•eOžœ2cÆ)楯ï§š39`×®sæôf>2vìÏ0±lÙUC%%Ù;wÞ÷éÓšÅbQDD’±±&EG§—0åtîÜÂÁÁ‚ˆ||Þ3Säž´¯ï'KKý~ýÚÑ›7qœ«UT䟙¹ïúõÙºº8¡€p"URÂþü9%,,aåÊ뺺*;6¿t)ðúõÛyz¾$"ËÙß?*/¯0&&ùÈÍ›¡-Z¨egçoØpKAAzÈ3"ÊÌÌ‹ŒLŽý¶víÍüü¢Þ½}|Þ=xÖ¯ŸÉÍ›oˆHCCÉß?êÆP‡Ã……ÅDtóæ›!CÚ3eÞ¾ý¶_¿6çνæžôÍ›¡‹õgF Œ16ÖTR’%¢õë‡ |`ûö»·nÍ•••bÂÁ A?ÃÁljaa &&ZÜ“¾yó••>3Bddr``Œ‰‰VXXÂÌ™=‚ƒ¿tî¼ÙÂbËݻﱉ ›]~O‹ÅÂÍ!P}›c¬]PGÖÆj]-X¢ý¹äˆˆ$ÏGÖÖëØqãÓ§K”•eÙlöíÛoœN¤§ïjÇÃrF«¯Åõ\˜ý>zêŸââ’£G}Gî\‹óæéù2++ÿû÷ÜÈÈä6m´ñ½ˆ I,€zÇÒr[`` 5j$áâÒ³VæÁÃcêêÕ^ \PV–µµm{éÒL|/PkVÔú< ÞaøðumÉT÷é¡§ˆpí  ”OÖ@81„ÓC" jç§– NÑâJ~N+TÆ»çÞkFµæ¼üsFª—)’B*1󵫯j ®©¹pÀf³±¸¡¾+ÌÏ]=ʸ0?W$¥yn•ô%‚órÉÑgUß„U¢ÊUùÌÉMm5ÕÚÙ‚Åý¯þÙ×À^s@ÏÀ‚ÿ¾&§ ìëuL$¥m¼Q•Ôb•7]‹¬•ù¬¾Z»½*^q:€ˆVž <ôªk;€ €(Ì+T  óê;ÿÛ§ì¦þþ%™(*È?µaúòAÍC_ç[;AË¡µ®P•ËÀ3“ÏîaN@_=‘áäº)̼e}O%¢§WÝW m9×Fþäº)…ù¹|+^ïƒáàÿ¼ô>Ó¾Ç)iY³Cî]()þ¯çÙcô„¨oŸÝrýëþÊ3÷ÏìŒzë_ÆðÒ<ÖOKŒgþýèÎÉmóöyo¹mj5àÈïŽDtd¹£ã¢={Ÿü0iÉ…?çÑæQDäÀ^~Ò¿t!ÿ¼{yïÌ—?¯î}òcÐôU'Ö8}ð¿GD§7ýšî}ls÷3v>LµuZzvÛìJT™ñ%¹âüè*3ðôæ™3ÿ¸´ß7»ÿ¤Å¥çÓcÝÔÄèp_¯cÃfmlÑÆüæáu|k'h9T¢Öªrxf²ã\‡…»u ÚNYïÁŒ0yÍq"úíàE•&÷.Þ¿¸øÈÓíwâUšê\ØéÊ·âP7áv „aù{Ÿî2`,±$$Úv³å>ÀÝtýÙN^&¯¬ªÕÒdÄœ-—w/*cxi[nDqþ¾ud½Ã¢=ªÍ7Ѷ>­° ˆIJv°‘’‘ë1jfvFz¹…øœØ:né¦Í ¥däºÚê¼þöÑD´Ù+ŠˆFÎÛ¦gj!)%m=l*gÏZ¡*3z;Ì‘UPÖ1l7q…Ûý3;™Ò2r™éÉDÔÌȬôFvë­"2µì¯ÒTgöÎë«Ï†ªßåP‰ZW¨Êeà™I‰F’}Ç»Ê)6þÈŒID&]ûу³»'­>ª¦ÕB^I¥ÿÄE/½Ïl¹]ºâõ: á/8¨X8ÀePOÅ~ þ–gj5€yÙ}ÄŒgw³KJ~îeå¹G6êÜóKxPÃK“’‘ãüýþu3#3æï&ºÌu‹k/¾‹~ÿꯅÖ Ð4“Ü…D=ÑhÞŠó²‹íؘD$)%MD,ÖÏf(%-[RR\‰*ó0hß-6âçÞnÖÎëw=þØ9³wØËû¥Çd.VÐhaÄ=oíø.‡JÔZø*—ïLÚMýÝûøæïž»9Ãã"߬fÀœwXØ·IA^3<G@eÅ5T¸çùê#¿[9ßf[Is6÷q¡.òYRRZJFVøá¥Invϰˆ '‡–Œ2ïg¿ãA²03\X_Àuÿacu-i9…ê«rqQ!g'Ýܸã²~C×]Þ»äùãÂL‹íø-‡ê®ui¹Y?V44“f=†¤%DÇG½Ïþ‘òøúÞ§?g˜]R|àE®{›ó¯Š×ŸÔ;xB"ˆ¹â¢ÂWwÏ­»ø^ÛÀ”30)æãáåý9‡¤y9LWALx ™5gLAÃÑ1lþê¡y?{îë§-<ôPSOØçé·³Œ‹mÕ±;ó2-!¦µE‘W™ãSˆokóÞÜCŒÍ{ÏÛs{óDs›aÓÊßÚñ]ÕZk¾‚]1hß­Œ¯Ànò²;'·iéµ6ïg/+¯Ä ÔÖ7ýâÛ¦k?æeÀ½ ÅòP§çNWJ…º ¨×0è?€z¡ /ÇëàªN}F©ë´ä®®£/#§xûèÆ¼œLfˆ×¡ÕyÙi 1wþf7ygLAÿ%ÅÑ÷ä¯D”žø…ówÇy×ÿZ‘ü%²¤¸èÙµ#;gö&¢œŒôÄèð¬ï©Ì%ñù9YD$#§û18øÑUfîBO_ueï’˜ÅE…ß’ã.í^dë´„Ù_Ñ÷”xf~¤&Ñ·ä¸JTY^Iåþ™yÙñŸß]Ý¿l¨ó:Îv$öc0ÅG½gܹ瓩uQA>wÉ|kÇw9T¢ÖÂT™Ù.1³óñç\e~rùÐÙm³™„Áw&‰È¼¿Côû×Îïëí0‡³Yûeìü³[gÅE¾),ÈûûÂþˆ '|+^¿ö Ð`“›Í®h2 "V%>óó“,¤NÍf«úV¤3{Gü{oý¡—…~v•m™dHDrŠ÷<þîlÁ·ì¯«û–*ª69w+s3{|‡sopÝØÜѽSÞ=µ½0?·m7;ÇÅ{U4tžÛ{ý¯Í[ ŸµéòžÅz=»÷±M>'¶ê´uÞ~YM«O!.y\•ÿ¶éȹ[ÛYä™hé—ªr|Ôû³[gýóþU³VfŽKö´·âÔËiÍ1ïc›‹‹ §®÷hmч{>¢GDªšÍ·Ýþ™"ßÚñ]DTÑZ Så²÷[nü£®ÓRÐL‘ω­_?r=xŸûHúç®»Ûós³:ý2züòƒó{(–®¸x„ñÛŒ£G„'Tr_éO"@ÝUÜš`+ÓìkÛÛ~v‡^ÃÅõëæ®§Å ŠY€dÀ¨Ò5l6ù꯴øèä/‘f=†Šqùn¢±ÝF2¨ÆpÀÉXÛ þbnëÏHKTV×f8ˆ’â¢k­è?qKO|«ýbÃÜqŸ?ªb2 ‘<‰ý/\¨õÑâšD´ÄV[Èá 6O4_b«­c`ÚË~6o ¾ÇÎ…‡•»ü°4–HJù¿YÈ5Däûñj ’X ÐâDXGlRªgÚþÛqˆt €`8=„p 2èŽ@8„@8ÎU €µ#€p N‰U8èÑ£¾ ¨EèŽø/cÕJ{ÀcwÆÇÊ€‡ýKR =Xîÿyòä –;ÔÍ£je8 éÒ¥‹´´tË–-===¹ßòöö633“‘‘éÚµkXX˜³³3Ó­¬¬˜|||Ú´iÃÿÎ;:t––655½rå ÷8...ÊÊÊúúúOŸ>eÞòòòÒÕÕUPPprrBóhÈpNAdËQ$Œ}}}srr8 ¦¦ÆþðáÃ=zÄÆÆÆÇǯ\¹²}ûöÌ7ÇýY---Îÿ¾}û~úô)''ÇÓÓS^^þîÝ»l6[WW—ˆ–.]úúõëüüüƒ¶mÛ–ùˆ††ÆóçÏÙlvhh¨k5@´+!@×F´8†È.Üh׮ݻw°°PZZšSl¯^½vïÞݹsg"ŠŠŠ²³³‹ˆˆà¹`$77W^^ž2bĈmÛ¶™˜˜0oíØ±ÃËËëÙ³g222%%%Lß@^^ž¢¢bQQµlÙrÏž=#FŒ@Ô«§¹HúPGÖF±_qA"»E¸_½zµiÓ¦çÏŸ§§§sŠ•——ÏÈÈ””,ûËã QUUMIIáŒgdd”››[úSœ—!!!³fÍ’‘‘Y¹reÿþýñ¥bSP"»æàÉ“'£F²··ONNæ9m‘‘‘!|9ùùùL`hii)((”ý‘Ž;úùù­[·nÉ’%ÇÇ— P'ÂÁ´iÓ>|8iÒ¤FqoÛ¶íÇ…/ÇÒÒ’¹t€Ó§Oa>Ø»wïÛ·o¯X±_*@éééááá©©©S¦L!¢¬¬,fø¼yóV¬XYTTtäȑ޽{‘‚‚BppðÕ«W™—_¾|!¢¯_¿ѪU«–,YPXX·hÑ¢%K–0)ˆâãã™bˆ(..ŽˆX,Vpp0½ÿ¾Ün¾p¯ÀDueãž={äååÍÌÌnܸall¼dÉÎ[Û·ooÒ¤‰‚‚ÂèÑ£ãââØlöÆååå»téÃ}rˆÿâÅ‹ÆÆÆRRR:tðööæ¾È–3÷K":vì˜AóæÍ=z„«Lqí4îV@‹ƒ:q·@¥Õ°BYq·€ˆO+€øÀñ´xÀµ#€p5ÝÑ †pí Â@ @w´xÀµ#P Käa©  &‰|W.)òXà€®9–³Ö¨Ck£Ø¯Š ¡Å5„ïQÐþW„AR$3ÄÀ&jWÃÜ1µæì‘«žªX,@ FUÿ‰ÑÊ_ˆdP7SB¯ÿ«d8@2q=ê‚z¹®ª˜* êE>¨\D¨p8@2ä}€ú’*×…P± ‘  :bÔ‘•Û7Fz @$1¶oT}#Ó—IE;„ H ~[@ÌU´óájar @Eá&‘ä¡ÂÎ)@ÃŒ­ z[a¨`8@· +@C€îh€ „lˆÇÂò²„@„@J„@,€†×Ž  ­ˆž$b2æ»hP¦õ· h}Pl¶;BÍ`±œ±Äx¯/ä¾§þ¶8f®§óÖ‡p8¨ ô.ˆ®9„@8€†·2"ˆ ÎU €`èŽ@81„ÓC€pBµ#"ƒsÕÀ~x  Ah-öÿ«ŒP£âã±Ä,€ÊÃièþA8„Ápá€Ƞ;á@ì°XÎ¥¦¤d~ðÛ·"š4éx`` ÝÑU„¦‡pØ+;×ÍKKË6ìàŠUUå‰hçNû .`#hzhz€p ÕôéëÖ ±´Ôg^jh(]¼8ÓÙùLvv¾Ø×]G‡Åüa™zwïÞZ$EÑë'š^ Àµ#PU…妮¾PFfN×®[ybþ¥K[ddæ°XÎ&&kxŽ]X,g!b""’hj.nÔÈ…§¨7BÇŒq:ô¯ ¸Ë¿v-ØÊj›ŒÌœæÍ—_¿Â]ÚÁƒÜgoï~õj0Ï„üü¢ ŠmmÛþÿ.SÅÁÁ⯿‹ý·GDÁÁ_EXæ’%3£¢"ª’W8_¿þ -N˜¦'¨ÝU¢é±XÎññßml¶KH¸X[ÿÁýߦÇwê––[ƒ‚¾p­f_¬¬¶¡é!€8Û³çá Aí>}Ú”œ¼£ÿ6S§žä¼åáá·zµ×®]ö™™{Ùl÷ð𠜷ØlwæærÙÙíëÓ§õÛ·k quꔿ»ûÓÝ»<<¦\¼àááÇùÈ™3/ÿãÇžU«-Xp3ü?î^½|üød7·‰/^|æ™Ð¥KÇw(=ÇwàÙŠ%--]"ÒÔÔa™þþŸ+ýÙߣ‰U´é•Ñî*ÑôˆhñâË›7(.>ôâÅ2Î@AMïÔçÍûeß¾GœÏîÝûpÞ¼>u¹éáÚ„¨ª7æÌ˜Ñ]UU¾qc¹5k†|øÀykófï3g¦÷ìi$-]Õglçæddä²þ¿ÍnÙâ}äȤæÍUÕÔš°}û]Î[W®¸tîÜBVVjÊëØØoœáýõ÷ã´µ««+ìØ1†gBaa úúMJÏ€Aîª5L>>×ôô¤utXzzÒ>>׈¨¤¤dÓ¦¥&&*;j{zñö¾Ú¢…÷D$--Sº(¾c–.pÆŒÑLçÁÛ=òéÙ³ ™aÞŽþ4th7==î2kRMvG jz"lwŒI“¬z÷6fýÛÔôøNÝÁÁüñ㈔”L"JIÉ|ú4ÒÁÁMO’lJ IDATìá·´¤¤Œ={>~úí[NIÉÇèè´š‰d*·nÍݺÕçС'ÉÉ™ÖÖ†›7èÔ©9}þœ¢«ûßÑŒ¤$Ÿ¨*##Éfÿ7W ?ŒŒ4*:%%lVƒ?–pqqts»0pàHŸk..Ž11û÷oUSkôõæÍK»wo(**:zôÊ€Ã8#*jÍW¾còè北Ãb~*ÂÌL355YÐ̨ªª'''>zäsæŒ÷‹Ëžºx7=¶;ÆÀíJÔôøN]ZZrÊ”n‡?[¹r»û³)S¬¥¤¡é!€8³µÝ»bÅ@{{smíÆjj ²²s8oéé©¿zmcc(è³l6›%\»ïСÙùó¿Qvvþ¥KC†øúõ"ÒÑQyñb™®®Šð3¬§§™lb¢Å÷]SSíÏŸSJŠJmÛV§×………Ž$¢G‘§ç‘Båå§8:NáÚüA€€/|ÇT ½zm` /hf^¾üG__nÚ´yÂL]¼›^¹í®BMOAMOÐÔ{ÚØl_¸°ÿñãϹOO é‰1œVhÐbbÒµ´·o¯›’’É}Á-_n7a±û÷à ŠJ°ysÕG>VhZl6;'§ ;»@AAšâêÚwÌ÷ÀÀ˜ââ! ™5«×o¿]LMÍJJÊX¸ðÏ»ööæ7n„–þ”—Wˆ££¾n‰‰_åå8/9]ýåÞï hLž¹ÉÊÊ•Q ŒŒl]X 5y®ZPÓ+»ÝU®é•&¨é šº¶vãnÝ &M:nmm¨¥¥\º@4=ô€X9yrГӉøøïmÚh/Y2àܹל·¦O·‘‘‘\ºôʇ EÚÚãã·sµù93fœ ú¢©©œ˜øgÙSiÙrELL‹ÅRS“ïÓ§õíÛó˜á¿ýÖONNzâÄãŸ>%•ðL‚¯… ûIKKNœx¼qc¹qãºìÞý€û]++YY)ŸwÜ]© ?®\ ~òdQü~ýýŸ®[·ðÎþW„©«7MKKæ\ÀÈéê§ò~ïXИ<B%š^Ùí®¢MOAM¯Œ©ÏŸÿ‹•Õ¶—/ç[`jz¸•áªjøðÜ×OœhÉýîĉ–œ‹¥K_³}ì˜ÓàÁ7–³¶6$¢ääL‡ÃnndÄþÛLHˆcþ×ÖnV\\þváÂéNN.Ì»RRR>>טÓüRRRD4hÐèË—O»¸,þüùã”)ØÑòòr^QnnŽœœ|bâW¦K€¹‚Ϙ¥ |ñⓤ¤dzzjtôgfD«£Ó¼ôÌÄÆFQRR¼¦¦NRR§âÚmPvÓ+£ÝU´é•qSߦWÆÔ--õ˾E¢!7=±„Ó VÔÔnܘ½mÛæ® ^Ú³ÇÁÂB¯vçÊÙ‚UûsóæÌÿ::¬æÍ%û÷ïôõk̘1“˜wÝÜ.¸¸8êè°˜ã~"Z±bk@€Ÿ‘‘Òܹ.\ëævÁÅeìÀ]dddŒÚìØ±–ˆ:wnÆùŸƒï˜¥ $¢áÃÇöík¦®Þ”)Á¢ß™ažœÓ©“.uê¤Ã©‹¸~Shzâ”óÄ‹ûRpþc°X襩˜-XÂßô\]«‘€G²ÔúŒUGM+Ñx¶Ve”àlñóò~¨" o¸JïTø–PZ\lz·>g ì³ÊÝe”¿|pZD@,·D"äÀæÞñpþÆ&¬îoFëø×„¦Õá –÷=H øš îÚ»÷áæÍ>]»¶¼uk.–F¹»Š:^&ˆ|©ŠªÀM›¼7n¼íá1eìØ.hP7á‚DøÉÝýééÓ/ß¿_žX»sboïþòå?BŽÐ½ûv1Yþl˜Öý/H$ßѪUƒÖ¯Ú»wëºßëccD;Bψ›ÍˆY½úƽ{ š6UúôiS-ÎLrræ§O)e\äÌ3‚¯ïÒz±étЉ YÝù¦ø~GÕôED$ñ}šPju¿1z õêµ³k×­))™C†ü%#3çåËz÷ÞÉù-–«Wƒ Wš˜¬9uÊ_CcqI ;))ÃÞÞ]Ii>óÓ±,–³£ã%¥ùiiÙ¾¾Ÿ´´–\»Ìf³—/¿ª¬¼àÉ“"JHø1fŒ»¼üÜ Ž1Å®Xq]Qq~NNAQQ‰«ëE%¥ù~~Q6ÜÒÕ]+)9‹ïÔׯÿo„ÀÀeå,–3óË.99óçŸWUýMWwÙƒa‚&Q+KXÐ^§^'ƒ‡½»wo-6­ ô%"ÿ‚ž>´°Ø¢¢âzãF¨ŒŒ$ßvQ+íQÌ#NÒ¡çDµÍZüÇwüÈݲeĉ/âã¿köèaDDGúîÚõàܹ_ ›ŽyhРv,GÇ#66†ÇŽMþò%½K—-¹¹ˆèôiÿ¼¼ÂG—-³}öìSZZ¶…EË_-~ûökçÎ-†?¸t©íÞ½ŽFF«˜‰nÙ2Âܼ…´´¤‹Ë™1cÌ££S[·ÖìÖÍ ==§E µ… ûñúÚµC¾}ûo„ÆíÚußÔT;?¿hȱ±Û^¼ø¹eKõ¾}w/]: 33¯t»¨•öØ`# @9îÝû°rå@"ºsçýÆÃFŽìDD‘‘Éë×ßzùr¹ŽŽ eeå jFD1û÷•`]¼Àü¾"ݼù¦¸¸dÉ[)©FÓ§{(+ËŽÓùС'­ZiìÙópáÂ~cÆt¾q#ÔÊÊ€û@êîÝÓ¦Ù´j¥qçÎû¢¢"º{÷=ó+M‚¦Îˆ®\ š0Á’ˆ6n¼=yr·É“»Ñ€¦‰‰?ʘbH¼zݲ¥¬ØT§Z¿ððÄå˯½x±”Y™Ùlv¯^Æ‚ÚE­´Ç†Ö¡\8­”“SôÅÚÚ°°°8<<‘ó›ÈÛ¶Ýùýw;fs””‘<`€)mÜ8ÌÒr«¦æâ7oâ.]r&¢ÂÂâ‡í¬ ää¤ÂÃïÞý`kÛöû÷_ßOƒ·¿p! }{Ý;ïÏ›w~ß¾±LáEE%çνvp0·¶6 kÛVGCCéË—ô´´l33]ASç!++ÿÁƒ°q㺑—Wˆ““SrVV~óæj‚&Q[;ž:ž JJJ6mZjb¢Ò±£¶§çoï«-ZHéè°ôô¤}|®ñGZZ†ˆ¢£? ØEOO†3åççÍŸïd` ߯_ÇèèÏhbë×ßZ½z0³2ûú~ÊÎÎoÖL•o»¨•öØ # €°ž<‰èÒEOVVêÙ³HKK}ίÁÞ¾ý–s«Õ²eWml •”d‰HEEþàÁñ™™û®_ŸÍüêëÓ§‘;·`~IÙË+dîÜ>Ëùø¼ïÒE¯E µˆˆ¤öFD$ùú.m×îç¯òøú~²´Ôïׯ ½yÇ\ÓtçÎû>}Z33Àwê<#XXè5k¦Êf³38³}ú´ÿøñ]MøÚ¿«šZ“  ¯¿ÿ¾uÿþ­kÖ¸=z%>žÍ<ç˜ï8ÌÀn_¸pßÍí ÖˆnÝzke¥Ïž™™™ûmíÚ›ùùE½{ûø¼#¢E‹.}ÿžËé-$¢[·ÞŒÑ‘ùÛÏ/jùr;f ­m[OÏ—ºº*ׯÏÚ±cÌÍ›¡«W{ýÛíºhQæïÀÀccÍsç^?xÖ¯ŸÉÍ›oM{„ë×Clçéù’Åb©ªÊߺõ&7·ðâÅ€óç_/^Ü_Ð$ðuóåéydâDgyyGÇ)þþQ_ FDŽ,,,ä;3pÆŒÊÊ*Ü£Ñõëç]]§´l);qâ ÿ§X¼%%ìÏŸSÂÂV®¼®««Ò±cóK—ù¶‹ZibÖqˆƣGííÍ™ÍG÷î­8Ã]]ûöí»ÛÕõÂìÙ½ Š^½Š–`……%ÌœÙ#8øKçΛ-,¶Ü½ûž9n°³kKDññß³³ó%%%˜£€€SS‡ÿòË.]Ýe.tëføo8xÃÙðEF&Ƙ˜h¥¤d>}Éñ:÷QQ)1††M‰èàÁñ..žšš‹ïß»ys®¬¬” Iàëæ+1ñ«¼¼ç¥Ï5==i÷/2óŒS†ä䄸x6óïóçl,Þ•+õì¹cÍš[·ŽLNÎ|ü8¢E 5¾í¢VÚ##”†^ª÷ªø30QQ©«V]?{v†ãwì¸ñéÓ%Êʲl6ûöí·NN'ÒÓw7 S©^ªÐ·Y+?¼Ô©“Î;šš?;·õô¤ÝÜ. 8’ˆtt~ÎÏ8ÜoñüÝ¡ƒVhh-ßÈP¹^òw}ªï ЫÞúðÃKB¬Àå/ô4\mÚ¬ýþ=gãÆÛ›7þSÑÑižž/³²ò¿ÏŒLnÓFKR 4úòåÓÅÅŬ­­æååúú>$¢ÜÜœÒã$$Äs7cjj2ÅÇÇþ[Ú(7·¹¹9Ƴ­/ ¡v÷(hP§ 4\ªªò]»nur²Ò×o"ü§<<¦:ôDMí7#£U1—.ÍÄ’+Vl ð32Rš;wâÂ…kÝÜ.¸¸Œ8°‹ŒŒ¬‘Q›;Ö–Çܼ9uì¨MDffšDdaÑ‚)måÊ?^½ò51Q™6mä† {°xÑ¡ÞÁi…z¯Nýº¼ø71=­ ~*qZ-N%ªž÷n^º|rÏ„µ§-,,…•œÜ_ÿh\­¦mçñ6½pª˜J–YXxÔo.„¨^ïv~ÞO£¸¬“udË‚®'HÉ@EûÌqzת©½jŒjk·qÆœ¬ !ÄußCß¼_?3-ù×ÙŸ}Ñ¡üÔ·Š Ÿ¶õòÄnÎÉHëuëÂñý«g|2k«……åÉ?_9½oúoWÕë0ù ·¯·\*_ÙI_%ï^÷Û³üëSWUtt <±û—™ŸŽ˜¿»w×É=\ÇE¿=rV£¶=œ=œÜùó‘- Zöø¨E×\j5:`óá_æg¥§ßXû½bæžP!D§º×}ÙJnÓ©Ïçã¢ßýCõz¯XYË[÷bÌO½–ßЀI&‘¯{¸-¥‹m¹Š‰"\j¨¦¬ðϽwãâÜ!Þ“7üãÙ°¥"*äê×oÕTÿ¯¼œl+¹4Z&M±–—Q(ò¤ÇÚóK³U­^[}¢µ­êqxзÚM¤ÇU\k~¿+X£žö2S“íÊWÒ·"!§ªº¿¤zÚ¢ÛûVLÓXŠB£Ú–VÖJ…˜y  ¬åúšâ™ëøÍŽëáAþÖͼsùœE¨—£sM Ô!_tÖ§ûÐ)×ϹàÉ5‚ŽïÈ׆LUþî9`ü¥ÈKÍÚ]<ºã?»»…¥g£VBˆš½eB¥"o™oƪ‹JÕŸtúÔÛò9¿Ôm2Ò’ ¼Þ¸Ý›÷n]20CNvVvV†êi‡jr[û’ÕÖºÖ18àÔŠ‰ï6½Ïü£±FSkš‘š4íÚúêÓ¤}ÏG1áÑaAiI„­{.ø†.Áo…ª‘&JLòxÎIJ»Aò;l pC"ù¥E·A®›|ÅÀ<Ξ B/ŸU=½xx»á2ó;¿Â¥VÃ[þÇ ÌÐóÓoÎî^£ÑuOÿ±rñ¨nÒcÏF­Ô×âQ̽º^¯ýK«¼ÜœO ‘Ÿê\ÇMß~ÝOþkãÜS¿¯xµï¨2vå ¼¡K,õL 1£„9$¥R™ßd öiòJ…ZMÛöò™ñã§m˜}W©P¤%'\9½O}ž×>³uΈ¨«9Ù™'¶/ 8%“„㢥y’âc„‰±Q:çO|)„ÈÍÎR/6áA„âqì}!Ä«ýFïùyjlDˆ"/÷Ìî5 †wÒ¨§ƒ³G«–ï|%/7çqìý?WÏØ¿æ»SWI3¼ùÉÿþX<ñÞ‹y¹9‰±Q;Žï6h¢BZ´F=5ž†_÷{æB…^>{ôןî\ñMMzTƾœG}¯¶½?nÑõÕ GýéïMó²2R_~í½þ_-·±µWe_¡W驯ücÚ—BTrrŸ{ Bc½Tÿrxóož—“•Ѱu÷~W¬êª]Ï+§÷\7ë~è5K+ëÞ]ßûb^OÕ«—ŽîÜ»üñÑwk6xgÔœFmzh/EßScæYuQyÓïÈ/3?M}ïÕ¥_¿ ‹g hÞwüÂ¥ÿ~g”öüÏ\ÇcÛïùyjU÷—z˜ùû¢ M;õ~oÌ·žÎ!¥hš±HJ9xð`½zõRSS‡^®\¹êÕ«8pàÑ£Gýû÷···¯Q£Æ‰'T3¯\¹ÒÃÃÃÖÖvðàÁéééJ¥rÏž=...vvv”¶«ö¥Rèååemmíáá±eËU)))=zôprrRŸ¨sAúJá(X8Mvvv×®]SÉÍÍMììì”J¥‡‡ÇãÇ Ì“žž®^‡f͚ݸqC©TúùùµmÛV©TªÏ‘‘¡qF×x‘‘aiiY°p ý833Sã¿´WÐ@e’““ééæ\\\„³fÍ ÈÌÌ\°`‹‹Ëĉ/]º”••µfÍšºuëJsþöÛo¯½öÚ½{÷§L™2|øp¥RYµjÕsçÎ)•Ê+W®H;ö¥RY§N³gϦ§§/[¶¬råʪ0þü„„„»wïvîÜYakk«oAúJá(p8P*•Ó§OoÓ¦B¡P½¤}b¶²²ÊÉÉQŸ¢=zV¬X1fÌ¥R9lذõë×y/_¾ü®]»4ÞÁ>h·Få¥ÅiŒIøw˜E8мRÆTõ“ììl éq«V­Â¤Çñññvvv …ÂÃÃc÷îÝêjOQ*• 6T¨ZÜ;ï¼£*0,,¬uëÖ¤¯d€Â„ƒììì¦M›®\¹Rõ’“““ÆÌÎÎÎ÷ïßWŸ¢=z’““ÝÝÝ,ͦ=Eâçç׫W¯Ê•+« tuu½wïžôøêÕ«...d d€|‰ŒŒBDEEIOV¯^«T*GŒ1þü´´´   ®]»*•ÊQ£FÍ;7777((¨V­ZÚóDEE !"""T‹ðññi×®jàS}†»wï !¤´-„ˆŒŒ”úTHHH~;••U\\Üùóç¥•Š‰‰Q*•>T-NZ´t•hT^ éééGB¤¥¥©ƒB¡˜2eŠÂÙÙ™p@8ÐýT.—K·Øh;qâDÓ¦M×­[§oÊÉ“']]]7oÞœ››«*påÊ•=zôOJJjݺõþýûŸ¹ Ëá(ÀÞ«¾OŸ>]šrâĉäää·ß~ÛÚÚºN:Ò­)))½{÷¶µµ}ùå—7oÞ, ¨Ï£]``` Âßß_{‰úwêÔIõ± kkë}ûöÓé àìì,}ªÂÀ"ÜÜÜÔ6Ô+¿k×.¹\Þ°aógÏÖ¯_„ ʯ5ØÙÙ­]»¶N: t?mÖ¬Ù‘#GTÓûí7õÙ¢¢¢4†ªÔ§Ô¬YóÖ­[FGG÷ìÙÓÝÝÝÉÉé×_Uý£áé\@içää$ ïçååmÛ¶M.—›[§ãR$Šæ{¤‘(ihK’Ò~*Í3f̘#F\½z533séÒ¥§N’>_+ä   {{{S„ ·nÝŠ2dˆ"55UÑ«W¯… FDDmÚ´®]»²]KQÆç  Ðé9 „€NG8(ÑøÉfxzÖ@8˜† €p€~\"@‘a8  €‘¸V <íE{™ÞÀsVäwÌÙ/I±`ÕEîèA>øx±Û íæ°+ÒéLó~Qñ¥Uák#aWò^i•U'å§„‚‡™LF `b‡WÀöáÂÿÀDoH$PbSB!o,H8 `Âù ßá€dÀ$Iwê&– ò¤e P˜ƒ¯ã/‡fErF¤Ô[£`CƆ)Ðè(|§Õ†Cà9³¢ ðüày¾1ËïçŒ9àjŠp-ÀKÀ‹Ý9Á{ÓÈEH˜~x Ó—¯ÁÂ^ÀjäDàyb8 ÌåíŠ8pÃè½@8È `š[#o; !ÕtpïH‘ €1€pR€pb`ÃÑáÀ»@7_e,Ý%Ê€g>+B$ÀXJå*á9É|hÂ=®ä÷A/¾»¯pYá ã€p0AÜp@8„@8ÀHÜ;B8€"õj€p ÃÑá`‚¸>[ÒÒ²h€®G8@©·gÏå~ýÖ¼ùæÒmÛ.¨O?~üÖ»ï®tpgcóyË–sÔ³ùΗ¼¼fÛØ|.“ùÔ«7]£À‡“;w^¸zõõ‰ûö]yÿýU½zý¼}ûEÕD™Ìg÷î@oï¹66Ÿ»»µgÏeÕKË—ŸìÑcIŸ>«ví 4f-‚ƒöë·ÆÉi‚¥ågµÒ¹è/ýŸ²³óºuk¨1ÝÅ¥bß¾^?ÿ|’=ÊÜx8Ú4ºžTZtôã¶mçYX|Ö¦ÍÏìz:W¤U«9ª"¼½çÒõJ+šÀdœ:¼té2™<¶^½jª’U³éãîþÕ¸q¯÷êÕ¤V-GÕáÉÀ¢ ¼ô=–|ùåë]»6ЮCvvn… c32–i4 }Áä{Ó37±ªÇ©ï¦Ñõ¤ÙÝ£G#c­oE²³sëÔ™~áÂGÇrqq)-[Î þÞÚÚ²À]>X gg7##&ÈÓ³J\\ªêéÇɋ;y28<<>11]¡x²[„‡?jÚÔMg çÎÝ‘ÉDvv®Æô;wâ\]'?Ý{¬t\–²±±Ríy11IµkWÍWå÷ï5gΡ+NÅÆ¦´iSkÖ¬·_~ÙÝÈE~é…B)ã~g˜Y×û÷ÌÝÈøEë\¹ÜjÈÖ«WŸùúë7V­:3dHõd@×+-&èæÍOÏ*ª§Ýº-ž:µGŸ>Í+T®l_¦ÌçÒtÿð¶mki—°nÝ ›7c XàÀèòå˨¦»¸TôõìêZÑÈšxx8„„Ī޾£iS·ß~ûT‘––µs神=—Ý¿ÿCýÌ¥7hà|çNœÎ—ÂÂâ6taG‚Yu=} ,ZߊøøthÛvÞ¸q]Ö¯?çë;ù9w=cF€ðLÜh:ÆßŸ“ôå—;||Ú«¦ß»—P­Z…Æ]ãâR†ݨšþÕWÝ?úhÝ‘#7µß¦!ê×wž>½ç€ëòòª‰cÇv~ÿýU—.ÝSŸhÀˆ¿ürG||êÇÉãÆí4~]”JezzvZZ¶½½¼`‹~æÒûôi¾oßÿ¸wïå~ý¼Ø£ÌMÏ(¦Ôõ´X´¾qv®ÐºuÍ×·iS«Zµòt=Fð"µjå9|ø–¼<Åĉ]›7zpãÆ!ƒmˆŽ~\¿¾óĉ]U7TòI[«I“þ¸q#&;;×Ù¹Btô<õ»t©çNÜØ±;–.ý@šòå—¯ÛÚÊ X››«Ðþ ãÆ½.—[ °¾BÛ?l±páÑg®ESïÝ{$“É*W¶{õÕºŒ.Ø¢Ÿ¹toïšeÊX:t]c5&&é?OϳêzúX´3æ5oï¹~~S´ ¤ë• ÜXºißã%$¤½ùæ² ÞoӿɏhllÊ{ï­\¸°¯Î›Åè &ß› pC"Š»ëå·rYÁˆ}øÙíÃe˜¯Ê•í÷í9wî_ªïp7nç¢E†Oèzæ€Ë xô}ÓûóOæèXnß¾ÏUO·lù˜­º^©îz PZ10 ÐõP’qY`:øUF†£Â ‰kÕá`‚¸w„pèÇå!†£Â ‰kÕá`‚¸w„pèÇå!†£Â ‰kÕá`‚¸w„pèÇå!†£Â #XÑ¥LæC#…çã%3æÊ=„”t¥÷*©ê¦!.ô‚¶QIÃe¼àÌ­ÅÀó åô8@>@,á¥3ìÏ'˜|§#@Àh0Ý·kŀ⎌Õpò¿*£áäÀôG ´{™™L@8ù )¥?š„ù<»ѹ@8@©ÇàȦàèññõÉ(qh©oùE÷@ñZt^êÒwýKšþb'85jayÁÖ´Ÿg› 0dJ¥¡ã¯LÆ/òÍî¸ïРs1|Þ—pY%tüÀðÛ F¦j’ €pò`šÉ€XÂL9Èo2 ps9ð•X×ÏœþnÝ’Sl1Õ§`~Öž}ø…$ Àž}c÷$Â$|ÃÚÆÖ©zíöïútê3²¥Méé‘ð ¢È{Êä7ÜÇÞ7PlNVÆw6¾íе­úô¯Þ¬žø0ÒÈúíCŠéc&únÎÉùÚ[Œ¹‘‘”ÅtóÁJÿ¼©›/!æ‰]áŸ;ïÐý·>ûîØÖ…çþR€Ò¾ß\ë>kÏÃ3žØmk_þìÞuÓgî1r¹9ÙEXá¢-Ms{ýrQñõ–K+üsé$+ÂÌ4È,,<x !ÊUr´°°´+_©iÇÞ£8¼ùÇ”f%·)Žf±çlî>tJÄ­³²ñõ 8U„.ÚÒ4·WýæBˆêõ^±°°¤S ð‚ÃR©ävq˜êøæHÈc£TO#ƒ/ÏÔb¤·|j¯~‡~UŸóú¹ƒß}ÐäóÖ6sµŒ¹{S½¿l;Rú UUOïZ5µWQmí6Î’“•!„¸î{è›÷ëg¦%ÿ:û³/:”Ÿú–gHÀiU!Á§¾ÿ°é˜å.þÍ@m“âcî^÷kÜîÍf{ûþ¹A{†°«ÿ|Û¯ñØNm]¤šxåÔÞÉ=\G·³ß0}—lË,©UçñVU,7;kówŸ|õ†ûå“{ ´ƒv#h”¦*Põ/A¾}ÿaÓ‘Þò}ÿã™Magÿ½»×Ψ¹ßÁ-Û÷´–—iÒ¾×ÅÃÛyšƒí·&¯?7iÝÙS¬ <¾KšøË¬áÃعôlZ—„³ö…ImûÕÆóBˆM3†>¿uvﺷF|_½~ó?WÏÐ×:A£4!Ħo?~~Kz|÷ºßá-ó?ûq×âSIo|ò¿ ÓÝ8Ø@Sä‹F%_í7ªï¸….5ùv“4Ãàéë«V¯ýåò£e+VÑÞ(:Wœdj2ò†cÃ(™GÆ"1º½—lLû²‹Gu³´²®ïÝEõ’¥•u­¦m­mlÛ¿;<-9A5}ÿšoûŽ_T©ª[…*Îm{œ“©z)ôòÙŸ>{íõ¾lõÆ»²Bˆ£[œ¶¶rµêvå*v0Þïà¥R9ko˜âÑs=xYYËÛ¼5TuÝ¿æ»÷ÆþX­F=ûò•»šd æçþÒ¢ëB™…EÃÖÝ4Æ6„ú~^ƾ¼K­F¦®<²e4Qnc›’+„p«ÝDûÔ2gÿ=!DƒV]*:ºŒ\°gÚÖËúÚÁ@#¨›½/LõøÐ†9NZæèVËÚÆ¶e÷þ½|¾=°ö{M‘/•´°´êܬmÙ 7/I3ÄF†XZY×kÙYçF™ýg¸öŠ“ `¶øm”²Áƒ"ÿå…Ÿÿɲ´²ÎÍÎLMzôð^ðŽ_¶íý±tÒýfÇõð ÿëfÞ¹|Ný_ƒ.¸Õn"=®âZSu7âß›ç]=ýç¨Eû\j¨fŽ ¹úõ[5Õÿ=/'[º'@&{r”·–—Q(ò¤Ç7Î<}½ô¸¢£‹¾jGÞLŒjàÝUzÚîía?èìýÆ@™…ŽÄ_³qëÈà'g» öl3âØÖEo|òuýV]4r!DÕêµÕ'êl} Aýc!§ªº¿¤zÚ¢ÛûVL³²–ëkŠ|ÑYÉîC§\?{ä‚=Bˆ£¿.ìøþ}Eª€ÆŠ“ `žÃÂøË  ä䃢?°²–Ëd2kÛJUÝêµxmÀÔ•¿//½pjÅÄw›¿ÞgþÑXþ‘–¬]T¹ŠŽ•ªºUøï]©È[æ›±ê¢RõgønÁŒÔ¤ÜÜgßóÿÏþMéɉ#½åÒuôq«D_¹xt‡Î™órsT'i÷:Í&oø§—ÏŒßO<·o½1M¤»ô4‚9ÙYÙYª§ªÉmí ¹ù2R“¦½S[_%›´ïù(&<:,(-éÑå“{Z÷\°RBî$<Ÿd òuÏù¦šÔYZYçý{ñ~Ó·[qÌû·Ç»ÔjxËÿ˜öÿ¶ykhó.}WOî#ÝÇ qölzù¬êéÅÃÛ WÀ¹fƒÿüýï9L!t}>0/7Çÿïm3v©ŸÞ¾ûãÖ_æè<„^>[·y'õ)ušw½èÀžeSií ¯ ðlÔ**øŠê飘{u½^-äö 8þGÍÆ­ l¬îƒ'ÿµqî©ßW4½O»rÛ(%gÀ€d€ç D~oH$ÀdòR©¼wó’"ñadnv–R¡HIŒ <¾kÙ—½>š²Rš'=9áAø­ÔÇñg Bd¥§JÓ_í7zÏÏSc#By¹gv¯Y0¼Sƒ!Äã¸è—_}çµƬ™Ò/=å±4ókŒÙ:gDTÈÕœìÌÛ—œ’΋ÒüÒž¾ðî L; ä7.”Þaޱ(¸!fªä|x@ zkhl¬~Æ%€‘ Ô<ÿ³À„H ¦–НGL>(¨ob­ „ƒg/…@PÜNW9ýßµ“ @8L' h+ªN¡Z±àE '<·ã—@8L<"²S JZD(îs6É„Àô#B;—J&Õå†â»fD2á0ýˆ¯~ÁPAéH(ƒÉ„ÀŒò‘ý‚¡‚ÒDÑ]?â( Â`á™ý‚X`æ $sŒ:»F‡øÒ…X`ÎdÂ`¦A»k0Z@D €p€X@D €p@×ù€X`nAç‘d BP’ @8À€t ! @8ˆ à?C$ ˆÐÌ*A804#GK³KÄÂ@2@¾óà€ÉÇÁ¥ä?"päá0Ùd@,ùÐfA€däWû¸öÚ¿ô €R¬C|ä€p³U<á K‘¡Bä¥ç%žH êT}BuÕK7‡Ý¬5«V›°6nŸ»Ý™zçéûÚ3£G5ü­aË€–;U¼5ì–z¼ð˜äQ¾EùºËë¶ o#„ˆÛ¿7¾é¾¦­®·’W“ßùúŽ"øËàôôˆ…ÕTk}»µÛh·ÐI¡R¡wgÜ­·¶^ËÀ–)St¿±ÖS”K)Q?G5ØÔ MX÷qî·GÞN<‘<68=$=zu´ógïÛÞ®Ã]C¾ ‰\é:ÒµuHk÷qî!B„ÆÌÖ2 ¥z5¼Î?¹˜b`u‚ǧ‡¤®¶v;·¸ÔB:C7û«™v9:WÓp5€|8WýÜÇ3¾¾×û^—YË*u¬ôôDo-+ß²¼E çAι‰¹ªé?FÔœUÓÆÅFî$¯öQ5E–BõR²_òÕw®ºŽp­Ú§ª¥½¥âþÊûµÕ¶q³±ª`å:Â5vg¬PŠ–[ !<§y–mVV&—9õwÊÉxRøüÏžvµí¬*Y¹vÓYg}ˆ\Ykn­25ÊX”±¨ú^UÉ "Z\h!„pë^¶qY ¹…ó'ÎÙ²=§{–mRV&—9õsÊËB3›»z5lkÙJ ¬Žtš7\m}í¬N½«i¸ |Z äC»ûídÖ2E–"'!'ãNFØ´0§þNŽï8 !šŸižñSD²²ú¿¤¦Ø7°——ñ(£º1jiÔ£¿5ÜÚ°Œ{ÕÌiAiš_øÏXEŽâÉÍÿžz,l,”yOnÁK<‘X{Imé±¼š\gõU É7ÉÖÓV5›ã;Ž÷æÜ³(óŸ7 ‹–YË„B!ŒœM÷¶Ëô­Žz±úª­¯ÿSµrt®¦áj ù r™tî±q±±q±)S½Ì•žW¤pä›të³[žÿól°©ÁÙjgŸþRä¥äYUÒìkÖU¬m\llœþóöZ©P¶jkacì^nJ®2[)ägÒSE–B‘©°,kù$[T•K£%…žjëmg=žÛjò±F˜.+…È V2eî““Að˜à&»šTí[UfùŸáe»zvÏè¸ÞéC'ÇÞŽ7?¹)ÝÇðdæ:vÉ矾!ŽÛg¸vuì¤+èBéͺöçúôU \óriAiª§YQYÚV(úöÉQ®žÞUÓSm}í¬ÏsXM€p˜%¥H½’*„ȺŸ¥ÈV…Èy”¿?>裠ÚóŸŒêç>ÎMIÏIÈ ,„ÈKË“¦» s Ÿž–¡ÌU>øåÁÕÞW¥o@Ê~íð¦ƒË§.7‡ÝÌMzríÜu¸kèÄдiŠ,EôÚè$ß$!DVä“ù¥y²f !²¢³„Õ'T¿3ùÎãSŠû«î !ü½ü5ê®]izõqÕÃf„¥^NUæ(³¢³Â¦‡¹r˺¯cYš‹¾Ÿeälej”‰Ù£ÈR¤¦„ÍB\hyÁÀê ‚Á,vôÂÝs ñ ‰B&,ËZ–m\Ösºg¹æå¤i÷Wߟnëi[cj°oÂz8x~ã)½µ,*ji”"SQéµJµf×òkâ§:—dÇfû5ô³«g—›Û*¨•âþŠûQK£òÒòz9¼4ï%K;KÕÒ¥UÐx·+.lZ˜ÌJVgYЉ¡5gÖ¬üzeúkT@îüä:Dü¾øðÙá™™vuì<§yVê\Éð²4ÛáY³5ÚÙ(d\Hî£Ü*oW©5»V`çÀš3k}¤ï5ÊÑYmíñSDä¢Hûzöõ××·q³Ñ(G{5 Ô¹÷¦ðk f‘ ÂÌ —ááá€üQ*•ÚÐäq ÀSÆÿ³ðØ÷1P|="J´G‡ù5ö;WýÜíÏoŸq®V嬤ߞBd„eȬe;TÔ˜Sgi÷WÞ¯½¨¶›U+×®±;c[ø·B¸u/Û¸¬…ÜÂùçìÙžÓ=Ë6)+“Ëœú9e„e°ÂP¢™Ûnn¹4òj韛'UÝǺG.ŠTï‡8ë›SCZPÚ…æ¤ËçëžWd(„å%r !„=y*³– ;PZYÑ€I²odßìP³¤sIw¾¾“=<Û©¿“¢r×Êá³ÃÓo¥[Wµ~tð‘ç O}sjF+…²mT[ ÞNèÆ50r0xPjTh[¡Ñ¶Fá3ߌq\³1¦Jï*–e- ̩ήŽ]òùdÕÓ¸=qìNá ”¾÷²©×R…i·Ò,ìžöô*½«¤¦D¯ŽvþØÙÀœ–v–©×Rã÷Ç_í}Uá:Ü5tbhÚ4E–"zmt’oRÖý,!Döƒliþì‡ÙÚO¥y6J.+Àó™|r!íZÚÍ¡7•¹Ê:Ëê¨&Ê,eNýœŸ}lWÛÎÀœn_¸]yóŠ}=ûúëë !œ>tÊ}œ{ýýëyiy½^š÷’¯‡¯¯±ŸÔ’~ü´Ÿú7ó7ùv&À$ñQF˜£ñ4>ªgV®÷½î<ÔÙ¡‡{ÉÐ‰Ë 0G§«œ6ÛuÏŒÌ̸“áÐd@2À™çO5*s•á3Ã]GºÒõI€Üs³Îfõµ‰³¢²\|\\>vaë“ ¸çfßÌìk•A2ž‰±E˜;ó¼¾’@8È Æâ²ðogÉ„\bÉ š|X „àP Â`îù@0„€“I<ä3‚hçÁ±  " h èÜ^"0@Fò± ÐÌ‚!b@8@D „Db@8Pˆˆ@J(NW9-mwá %0Nð:€p”¬”@P „…“ 8 „ "™ ÐH ù¥º»LÓO ÒÙ®C|ÚD# h·Í3 BóÌ €pàAAÅT¯A¨Ê€@61”Šè óôO^dt(ò0aø|Ïé 0ý0Áù sdAÂÐëÿ·d7J5Ð/IEND®B`‚galera-4-26.4.25/gcs/doc/GCS_Architecture.odt000644 000164 177776 00000072655 15107057155 021702 0ustar00jenkinsnogroup000000 000000 PK ŸO7^Æ2 ''mimetypeapplication/vnd.oasis.opendocument.textPK ŸO7 content.xmlí][sã6–~ß_Òd§ìlKâE¢$Oì.'©éImw:5íìníK "!‰Ó$ÁðbYyʘ×Ý—ýiù%{À;)‰”ä¶:‘SqKÄpÎw®Aú«×O®CYÚÜ»í©¥G˜grËö–·½þÚŸö^ßýËW|±°Mvcq3v™õMîEð/Þ^x#[o{qàÝpÚáG]ÞDæ ÷™—öº)R߈¹ä•0Ú8­» âbïˆ=Em;#m©/·ŸY{[]·íŒ´j±û‚·íü:ýÔ]ŸFv…‹'Çö>ÞöVQäß ‡ëõz°ÖD4ø°qçÜé0ô”ʵM¥1-9d`OýP¶wOò@WÜ¥jà ²e_ÿ{Ï¢#ï¸ÇÆø3õyø—¼PbÙ·#fa?1«ÃlM,w˜î‘6õ}3¾MÛ¢äõBò£gC~d['n =žwÈ;Û3WÛáÍ(ŽŸ,ÑùVc8fhùž­Éßa$o»ú*„'ª<àKúþ’y,°!¶Éx³vdMÞ^ß>u¸¶Ãð„¨h'„åhÞþƒõšì0o9rúᶈœ\§qÒF¶Ùãd¡Zü.1ûƒšÍ•0éÓ€.ê¯Ò¸€¥³øÒ—½þÆ(VÚ?iÊOj/<ëØ÷H˜Adò .j×>uì%$˜Äad/6éÐÉ×~ƒÁàk@²[P'”R˜Þ!vˆ ÐÖ‹s¯Ÿ'÷’Ft/ŒX¸{  FgýdrǶzÍ$kÛ²Ír …É…·,ðsÙ_×Ì^® (€zÀ*9†lèCåH½íÍXa;ì)!h¯˜Ñ‘Ž¡½¸i‘à˜BöçÜÚ¼¸ÆQΑ8vX&xûò1kò{lú{lö{LU~·’TÛ|’Wóœ³dÇÕCç,ÙAÅ~É^¾ÎP*4> É*@J%ÔVéô——î *䳑î R䳑î zäs‘NÛQ“táÍXÐeâ%ÃaË>Õ…4'+Þ?-ÄOw$ÿSò˜ÌKé†ÝòtuÚ¿@“ªøQvdù1áŽä{b½™ucrGm­¤l±Ìʬ';òÞ³šXm·»Aëù5ì}Û³#˜Íl/ÝCÍÇ‘­]vw¨«>Ô¡=àn?â~­5@è6.p˜wwØ"j ¨Ž dXÃç¾í-œ˜y }îá8v$ÞKÁñŠ ¿ãÏ4g@;| v­ú@çcÆË v>¯­Ê.Šy^ÅÔÎ+äKŠ2Ÿ^|马vÀ³­ÊÂ(àa‹†)ÿòR¯Üö~t¢€.laÆù.1«—+;{ð¼)”âOÒÂ!§Ø°5VþõTáÚ–å°”çµB¾X„ïž03©š"– í›­í%>/¶Öhkµ¾…‡ÙÌ02û8™1´R‚vÛ+J¨9 úØî3«Ÿ šñxß±];ú㪽vÚú µc}5›½ˆÚÿx*«N?HeÌ`ÆéùEe;UV{n²…ÊðUGIÒÎÖeJ1»¹4øq-D9‰:©“0ÏÂWH|ù壬FRC™M³1Nf ~ãS ßå‚ê⨊X¯ZdE¸¥Õ¼¥)/´ÉÛ.–Øh‰µg\»–”÷’Þ%Æ™¯ ð§TFJÚæt±ô‡›+–*OS ý¢¼íÛ!µgxÏJ{êh:ºho»öN³›¥ë¦i—ÄýitvðvVÙãÚ¬­S¿Ê[`lËfÙ6ë›ä«|G`nE#s•î$;ÔüˆŠXÛƒ Ïmu~±¶fk;dîR'U'b£ÁïÈ^å‹)}ùªä«|-㨵Ë 4<ÇaÙÛ];lWÅ÷ïó8HÍíëoq4îh—¬Žú?xóÓè¢N.ð¼pšÍLE™Âû³Zö–¦5¾®Ê:4µŒRÔ¾˜Ås˜ÅÁ¦ŸfÏãN þáÖ×´' ø~¶ì Uø‹ï¬ ÜBï7ã0óNÙèûΦo±ܺïr‹˜‘glØÏR®äÍñõ‹òÀe‡¾C7}GâEÝÉÁ ô\´ÛïâK@œæ: ö¾åþ¸Qàãу|+ÿøQ~P¨µUýˆÔééûÊðjïîÍ7È}`®ìˆ™Q0ò ùÑfkr…Pˆ  ]'Ó­’ü†ði]a§Œ#Û)þ m|V7ç_À–-|:¾ -‰ùpr÷ KýòT#P’µÔh '3;ù[ypFjzõ Oêê4» —ÔŠ8¸K*%m }êÕ ƽ»oÜX hâàÿhs€¦:‚Ž©¾|øT¸¸ÕaÌÁßÀùúáŠú¬öÓfìíØ+%ìÀ^Õ±×u½ˆýh0™Nµ2öº>+`?L$aÞ ÿÕ_2î²(؈h_sÜ&! ÑTCI~ÙÄ¢Îë…B^/&ð¿V¦HÄ>ÐA ‘.̹e/l@¿ñÅ_ISƈ/þÕ;‚Ë©ß9‰JäoJžœ1¼‚“§ÿÊ«ÿM¾O•>+­D|“¸.²DÑ2vèmï •ô†»z¨µÊžZµ‡`­¿w&½¹ ¸»ã¨ÚñõB'_"xCÙîÞãzo•üÛëÅhϬFS¿Ö³Nšzƒ¬F>+T¥¤ð¹ŠÍ/Re&õ–¬ÿÔ‡ÚÅvcWDõÆvú$Û%› 4›=clò1¤íÓ€Só«´¡:Ž&³OMÔÁ´ÉŸ5šL‡žUc‰Å×Þ΀2Ò·*Þî è2i‹žÐÊïòŠäPÏBR÷@³/lÔÍ>¿ßhöÍ´%Ðì¯gLûûÕ" pø%˜6¢fÒ¨8Þ3k-Â$ÜîCgÚ ‡/1Õµáv¶ÛiÛ¸¦/Ô3ˆk™¬âZ›JëÐúµ1èémcž10fyšÆ õÁHîÆm“ÐeaH—ì™Eܲ[ l·F½ÏÆz]|u<ÒËe\°[)þl‡ôû"Íß™ÉìGÛ[’„R’™¸ÞE«€QkW Ú…"sÛ»íº¨Í8ªmó‚6P&MŽ2S+yA7Š ±<–Ñrar×½d¼eÈ7aÄÜ*| D†@­y¹Ö6*êtV ú–ÑF9–h4QfOGú‰C¹v\JšU9 )©D£dÝ%GךFi6ޫ牮UmWL¦J9*ʬ!J¨G®žï ÷{žsÝœ³ÇÛR+wÇ4Co¨»'…V¹ðO*{ª£‰¶¡cKòôÖãßæ>Ùè6'¡ÖKoHÁ³Üÿ 5µ1™”rиºc­Ì´Ù‘‚Ðm—Üì\T6»pÛHTFECTgÓJQ92 4š<¦ïˆD'»¯ÚÕšš«¶Ö4SšîT¬IŒ’5õÉ ÉóÖ;8 ¥¹PézK†¢Ô²nÉF hÚ©“nón¦º³7ɺc£´€‚¡0¸ðGÛ¯c…¯4°ì_Xå‚5ÁZ9‰VŒÄ¾\^…¨Ù(\KaÓ¸\žYŠkq|ÿ,ÌëRX £ 3 ’Ü|úŸ YäA¹Æðö2’'Vxp.” lp.æEÎæ%)î†×UGm€Ü‘ûÂÓ!ÁG”%¡íúBá˜aY±õ¤Jô©É\7B›ðY<‡är0A¾ û9¶ÐèŠÇ!ûȘS7s¶¢ÎI†ò7¾fÀj(6+BlmÅ#‰ß®¯hˆðÍ\âY˜`¾)r"0EbÀFä#S0“À¹þí×ÿ™ã{*E¿ýú¿8À©¹J• âA‹hxCÀäùZ½Å½?SŸ‡‰H¸Â±©·!â@;ÿ„„d{r—ÞßÞ±'“ùéá 2 äï f¤Ü¶H{‘ U§µ!Àž]æò`#&—È\Ÿ šwÕ‘‰’‚¹ˆÌQªä$é ñx" ¿°-ô ¬Î[‚Õ5“–—µ§âC6¥éo®®ÅLâtLrã}@š(¤ëƒ)¤T§´6˜S°| ¦©8"!—††µ! Ô8<2ˆ$ø°': „&ËFs@öCæR0ˆ¹øÆl0ûû·`ÁÔI\L7ÙÃ}EÞ|SmM@óõ+˜4ФÃ{ 5‡/a|úa„¼`¥àC€^pbc¡Ç¤±â+@j‚‡þ¥Kzf¬Ÿ K€Å»ºš jË] 'ö8z_ƒ9…QÁ¾Á‚脨zIH¡‡h4‡©W íb9¿°'§¿ÿö}Ë<®uÉãªØ#tù#K‘ç"¤p˜ßŶá€Ü;NF"mJÀeR Ö˘cœ3+>_'“vÃãHD2@ž:Ñf@®~ô-4Íß~ý'Æ·ò˜Õîß•†ª…Él,ˆvŠBRL’Ò^Ä 3‰ É0–cU$lÉQ3†By0ئÎÁ:\(N Ç àX pƒ$(tSŒrâmgø8™•xCjN"Ç-â'FQìJ…´ë·WÈóu’36àz%‚1×’Y÷瘱‹YÈŒ$úRq`«a9e²pjÀ‹‹™ ”%õPä+uüÌÂrÎOLøöDîKA./ªP|¼NÆæ1G[´X~qaE((ÆáÔZÄ´Ö2!ê"Ôÿ‰“#®¡-—) ä*uºB[ Ãi gœþž ô"³æ¡iý+™yÅ}T¾$¢hïØ-²#HÈÐM vEÓÒXŒ»@ŸB=f5µBš|ɇËÃæ Ùh'—Ÿ ÔÎ^Õ$»ëì„A¯áJ«NH×`ŸÏ¢yÇgÍqÕ%æê<:e‰y Uz$ÃsŽû׉„»@æÎœºäÅ0r*#F“¬‡À&Q™Œ’;¥mUž€>¬D™ÞTlá@5+÷7 ×+eÆ¡¦ˆ‰É bÑ_ËÉ$K€qX‡ÚÑ&‹Ž2Ý% ¡!„2lÉ4ÀlŒ¥L¶È B´^q˜ 3¹´š‚.佌U\ ¶ÚÆ~²S%*~ɨð‚d¸0]Še BÆÍÜc¨¬cÐl¼ ¢eÎŽŒ'×í‚å0 6çßäk_Òoi-&^šÿÞý?PKP݃«õ®PK ŸO7 layout-cachecd`d(àc``àd``Ñ2PKXsØîPK ŸO7 styles.xmlÍZ[¯Û¸~ï¯\lßdK¾ÛM²Èv±ÝI𤝠Z¢m6)”/ùõÞ$ʦ|t.-NØÄœÃáÌpø‘Ô»ŸÏe1„Ñ÷£tœŒ"L3–º?úöõ·x5úùßޱݎdx“³¬.1•±—‹:S±1Â÷£šÓ C‚ˆ E%™mX…©ë´ñÑ=”iÑʆv×`¿·Äg9´³Âvú¢íð‘5ØïstÚYaÁ§~÷Úù,ŠxÇ⌕’äÊŠsAè÷÷£ƒ”Õf29NãÓlÌø~’®×뉖6g ®ªy¡Qy6ÁVƒ‰I:N'[b‰†Ú§°¾I´.·˜v ’è&ªâ¸œÇ}k²âƒsCƒ»áåÃÃ;Ëý¾%’‡ž˜¬&ŸA¨ÿ÷ùS› ¼:–Âv\•qR ž¦Aûýc©ªƒY ÚÜi’Ì'æ·‡>Ý…Ÿ8‘˜{ðì.ÞcŠ9ÒÁ­¾g›ö`¡ÕíÚû‡'"Ä+zeúŠny±mÿÆ{Âãt¹\<2ë&«4 šG â„.â±’éÕC{Œ½ª‚ó¶JÜ9ÿ¶£§´»?¡H©þO©=ä…rØ"¢ô9q¿J™'‡ÝYd8«¥Þ |TU(õEê*"éâ§6J'Löˆö–ytOhz9q¸oƒžÞ @÷N¦÷ƒ4}{Aš ÒÜ’ÓfNgN pzbì®W™Œ•:•ÏŽmX{#ï×ÿÌèº÷œ°Þ [¡T¬r×¥'nùßP¥èÊ+n~`*GOÜ ¼‚ß»è·Iór¦m¢¡gZ¢óÙ=¬%ýDeôb;(wÃ!Æaï¤ïàÿjÞVŸã«ôè„øíúœ®Ï¯˜SDë»›S¯”¯·²~©‹ëÕmÞÈDx[0°è ÓÐoŸX=Êuü¹nž¯ïðžgS3n!¦yŸ$l S¯<ÒZ¦—›·E‚õÒ{¹ÇjiÝ ˜‘•šqý=¤*‡ÞW Z[ûñ‚ºÿÀÏœÀ‘÷=„] ¼ÊW•é(€¹:ºiɉäê w5ž.f–^hÁÁ^=¤éxÙyt¹š£|(c 40ûÀ_1'"ün9_õÑ»…ë–H>¬æ=DÒ‰¥ë}³´~ØsÒ~Š‘%ê¿Â~n‘h”Œ§«ÖÇëíÅÃ¥³ug¬õŸln´ÀòWßCvHªÒAè>fùb{Vº~í2ë4.ѹ5Ll¿è±+ç`“3Š}>x\ÜU&°òCwРÙz¡á0åÿ©…4ËÂ,Ó<Ùåë´½p¼úÅ`e¾›×#õÀ£LüÉz·ŠÚ%z»&­ D¢ÑÑŒf•¦»o4¾ÍíZö*ÕöIøcùÿPK)Ãø© l/PK ŸO7meta.xml”A›0…ïýíÕ1†„$°j•zÚC“CO•cOˆ»`#cšäß0°u¥ýæ›7ÏÆ&y½•…÷L-µJ}² |×Bª<õ‡ïhç¿f_}>KThÞ” ,*Á2¯mU5u¥ÔoŒ¢šÕ²¦Š•PSË©®@-tNÓ~Sn…T况¶¢_¯×Õ5Zi“c²ßïq_QÁ'®jLÑS‚c( ›Pc²"xd»„φêØy$­õ4¨Ã]è~\kìÖ¾7læáø²ñ¬:Ó,é­sP`˜Õ&{kÝÞfn«ðå¨äÍ{ÔWFÿnq¼ J²~ùÖÈB =‰Ö ^ºRI+Y¸^ýZÀ îÞ¯Æð ¨w=´-1×ܯÚüH0 Y»Á‘øF4ÜQ²º¹Dpúù¸Y±Gç-""›CÒMLÉ®]?¢2RYètÿl 3bÖ²ˆ¾=1mgDdÞõ‘»`*oX(tüÙ‡˜$ç ¢=)•#~çÔÙz;-t75$à,¯WºË–ú?ÔY{ÄÇÏ`ásXô¶ž°éÅÖ¶ýxµ•ÜÁ– @\7ʦ~{ñÝý(ÛXŠúÔ]Å¥ZÍÈpÒ Ë «.S •«6b7›xPù¥mà¶ÝÇPŠb²ï’ã‡'„ÿ÷óÉþPKAøÄ#îºPK ŸO7Thumbnails/thumbnail.pngŒ»eT[ÍöKK ´h¡-î^¬¸»»»µ¸{pV¤8/RÜ]ƒ„B‹»/nAA‚<ÿó®uÞçKvVÖž™k2÷Üó»föŽÑP“Ç~Kò[QAF ymAAy‚†ü§å_AA‘žU”‘Ôñý Í}“©ó·ãf-òë†\¹ŒÄY ™×š[8Ó¯Úìð8Tâ”ñ›ˆ/³rvöŽRWÕS«è:†·SÚ^Äc¢™HH|üð>)(5ˆèK$DÀüÑáì0ñó4¯$ ´Sðyÿ°¬s²B»L-÷¹±°î_õmߟF†ý—¬6!•L´- ú§´Âž`TÒµu…ô™”VÔ`D1N²'†"9Í>^ŠÌõÿïé‹×[3nFÍŸ´2e/û)Ÿ4p(ÅX8˜˜’S†-ElóƒZwYÌc8ÁÏgÝž“¢ú‘XÄøû¶3úæA-ÍN®¾³¯ý‘Ã1ŒyL5ÆmµÉ×E‰b042¢öÉDç•V?0b¾NåfÕ3±ÙuCìó F70žÍë±`h áÞöSv帀˜SÊã\»áémߎPÇÀ€{Y¶],u]zÖØÉ4ZA7k°x×@él^GÇÅÚ4yÊ SÐ1»!8¸ûẠœÆ¬]:ŠŠnqˆ‰‰Ù]–篔ÎaÎŽ¼Vrf˜xÄÔ;#®aËŽuo^ z·Ÿƒ;à Æ»ì¸17#,ud~—ãüÎË®Á—90¨+Ðøû÷ï]Çu`³§ÛAq%3³<¶A¿ý\Æbðå„p‰~Ù»™ ³€Tra í]ˆm>Üf<›^Ôÿ*º$>¯m ¸þQ ¯ê ù‚ÁäÚuésÒ082̘ϊ,õ£D9cdœÃiøú¼w´­Ζ%Ð*¿ |<67É÷ÙŽçOHçßà¾ÿæÜ…›¸pÖímY\>†.Öå³Mêê›Ò³è×6?Ãɬ­­‘C1ù“ŸÎ›- _+GOIMèN¹Çe yGÃNa7)tzCOâCëgKU/Z>øÁ†K\7z`ˆãºÑt6cÖwÓÿ5¾{9)ÞæKZõe»öÍýC°û¥6é ÃÒú—}ßø S¿¤¡=+.ãø¯´Zí¦¥(üv’ ƒ”@“àópʡӵNçXuÜ>?àÓe>f÷ßÉàë&lìGˆ%eàùï¥`Œç«Yõ•H_¤†|{ûŠ«Ó5a=ôã…šTdYy•㓬ôy1¬˜1u wqºf‘ܰ,¡)ÝYÏ}íT¸ÈóÃ%l—Ùv]ïZ<€~ä0s¢,ÅNÆ×Ú¾‡¶Þَ㮞 ùr M}G+·t†C.¤p¹7îŒ 4ó• Föt%ë…àÈjvÍ}œËfŒ(§² ‡yŠÂþWvNŸºÇ…fPz¾Œòù‚²µ e§âúí¦dèAZZ,5ÿáÞÌqÅ!ïà (øtçýL ."™h1;u,á@Á4R°ù¯ª¼üÜ·}Ø)äúŸ×Fuðù~£øüäCYªŸr¸/œÞþã¾c?/67½áíºò¯µ¤Žb9z<~¿&ôµ³—vZ‘%ü'¹Ú“wèu½Îú¸œï1 {<nìI:Úƒ qMßüBÙAg0Ç¥)æF‡Îz\l¶¯°wž4í §Š~ÿ:DÉ)Ô"ºØ\q$÷ùYmÖ½ü9כƄIy¯Ÿ“a0³PtŠzKLéb¿sé ‘ýǘŒçÝ sŠÑ{äçõA®ÔsS0ÑKùfúVbyW2 9†D߈Ðå—Ž$8ËLê#†ž‹|ñÉéc èÆ£»éšàã”çDàöAu -Ïa Þ!™ÂèÿJ\N "ŽšÃ¤i¦d¾’¡ú¿ó—©òy%¥|ù÷ïoï¿?F¨Õ?³#§êIάïfEá6ŸÓ’†¥yW Š«ˆôœŒÂ‘‘ÅÄOÁë4Þâ=ÕíFëѦI±¦'â¤þÒ.’}烰טnË}Ó¥ûÁ®à}haàSàq{Døûs²ÁžxR~£¶—d½C©ü±ÅÎÆ€yÐø²ý=fgóUú‹j6HñlÓž¶†-iðß!^Iï_n9½:¤2Êás®¾g[šÖ¿X¨pœÌ½&Ýx—€ŠG¹C˜$® ‹°²Ý1û)ÆnÏ®vx:÷ öÒ½Dˆ¨+ª ŸÓy™gEkÆbkë-ÝŸ¿±x* ü–X÷ñü… 9ôn“«êÏâö¹ÎþWíu³Š£ÀÀÆ¢SîFyxóïR@8çCôÌ}‡üyßO}º,M Ñî"o' êD¨+ÝtRŒ½xê|RGIãŵ—Ælž´U§›A7‡À{g'90µ2}B¤ÐÖ"Ié›BþY°x†‹^B©À'…þªåx8t@¦ HòÐB×5b ]ÉÁ±’"/†ýVb®¨ø)«ÊsNôa¸b€a;éô>[é.•KËöå/Ên?ÍÍ'c›¥lù.KOlI@òɲ`¤ÐÄG ÛîR59»5ým9teyÌ,Ò—5`/üœ@íù•Òˆ9UˆjF¦¢±õƸ!‰6ÈÙ¿1'ù¥?é×ÐQz6£p´WÞ,QÆEQQ,>^ÇTÜí8ä‚E<£aÅ[!ѧ©x´µÅe?[:âºuþ¯R¯Ì°iÎXïÌŒÞoü²æöMÇ?mfæa½= ÄËeEÍ›Ò~¶z¹ÕíF@÷c]Û8?yú»ÇåK›xOÇ¿GÕo?áQ”.é¹Â¸ ÏO¯÷À^SF|.«´„f:zµ«ì¹ÂmÉ5ƘÖç›––2êô\î%ò¼J[ƒ²œ¸¬Ä\—x¿î36Þlât‚ë}ËîÀR ÇB:ß•c‹­cõ´Ì4³µ¤<MAÍi}}øökBê8äŸ2RR<˜™ìX¬­Ó컟J¥&R³õüú#×®÷œ!ZL²²QŦîû‚“e€Ήƒ‹o™€F6,gd- ñÜô„t«š!’­g+LP#Ÿ”\Ùè“j§ÞH½ØöN·nSqõVR©ˆ¡ p¹õ¤‚†oQÜBÆÕÚòËÊÒ` †è9PœêÙƒÿ×¼ÿÉÒ ÄáàKÒ!;¶L‚ÉvwTÖa€í¨WÄLþÞ&¡e´Îfîð¬OÆžáBî?úF´ F¶~Ãz¿Vt²Ä™ ,™ØW|UK¹êÖ/`Œn"´1„}DÛÒzäÍ¡à{rZ<®$ –šöYÎÿÐ5÷ØoJ|t™f æOü“X–ÁnG®ó(ÖÈö_‰„·Züï%½ÏšÌ)U‘yCž)KÚÖ½1{á0¯^=àkŸ2¼£¶÷¬ÉzÚzŒ:­)•|;³üï³äN—óÀµ€$¶º!É…T#ãcü±!F)Ó7•‡M¿ÒÐ Õ ÒL 9ê¼ÚD Só­q)Yµ$#Í•õ6í‰ÓN+HÀÙð-³¶®Ã;ZÒŸ)$a?i¥›Q)/sé¬KJ`3 À¹ØO>ÅEôŽžAëþ~$a Ü(›7FäÝUi5ûÔ¬uhï}ü0|d=:#uZc|œG8]²õz8&*O´©ÿ™@ò™ ½? [eu_ª:ãØ‹b(âS»ÓæÂÖöŒXô€CÛÊd_vÝniÏ4Z÷`QW¾ï•&à­è~!y¸PÓy²ÚaЏ¡dZisM‹'5̸0°_iÍu‚4j"AÎB:óyeuµìÒXQQ±á1è|S²éù•ýHUi4[-¢XCá$^àû iýŽ%üèµ6ù“{ÿqï –Ü)r5ANºÜ±¬Ël“À‹ «}>lÄ×'´ÈÕ<½ÙaÞè#›ÿdpF:Eãáb¹%m’Vð{CÓçHùª“½¡BC3%˜;×+À÷HvUm¶ß0K ä{*}ýì¶."8GâAÉ3c%5˜€Íp3WaÞ²ñ*«x>—k·ÒÀ©Ç~¶Ç%H”ªÝeÖ %¬Î°IK.Ql¶””ÿ”¬óqF¹lÁ°”‡WJ‹ÿE3<øPØ(cÞn›ø/þܱ¾3ñ±fÅCqM&di_5×ýÇcV²j~ƒ£Ø“A0·cúš¹ñ¸éÆ‚hƸ?uEžPÁ+i5QÝvsvrÌ·'À›oZBÃýs-¶êöó{Ñ6ëæLPa€’/®¥½¢âŒ—jhÒqÄ é¨…¥2§V|{²Æm#LÒ:Ï®âw× ƒ;ö³'gÏ~{Äð‚¼ýðí7¬\Å·>Дè²a . §TÂá4í?÷C9…#ÖÐü©$Þ¯þ¶ÀÂæ3$P“­‚Íöö•¹¹©º.ÅÂÖ„}F€b°èfGs^Z›Õ?1°Ûç‡D'“N=›U÷¨À.߆jÑц•VÅ<Ák ÷†.°ñÎî·bMþrTÂW.‹E´¤ù–°ó|‰G$~n ª‹p¦_³ð¯YÅà&ðî?ÿ[³Ç>=ÇCëªË“öزó´TÍÃðàaøËê§¾Z´ö=Þ¾I’È’°K".ÁÐt}•lLË…xj¤³¯ÑuûÒ¯¡—Îv#JêBgÛ”›ˆæzO|ñìF1[z«³ñHO½7K™.j¯SœÖ‡!y<:Y3û,ìØzç±»óþÇ™JݼÀ‡‹ç}_ùX_^¨ãP°¶ü¢Æìkåuôá]Œs cQ —ÒÚÿ\ ~Âb!˜ø§Ú¤ÏÀjû÷Îë­$­Ñ½2ŸÒå^¼øŠh†bn¿[F¯ãFOÚãíMdzqsôÐÁô#iS´½äˆB®£kùÇÉ,Ë n“1 ”÷ЧïÍL¿¡øOý•öŒëè¦R»óB‹ê¦ÁfxÍ×<¦|Ȥ(£­z«ß4€ ÕP´Q½ Ÿ™šwzƒûëZ’Î×2êñï²±V™œ°RêÙ•éTÖQ8†¬¯X8XRõóµT²éò]W™Ð‘€›‚Š^½æ½µ›1³Ÿ­Í}Øn±Ï´,×1°AlмÓ+9(àÚÀ˜çÙx±®í>c,—Å:RçÒÏŽ¤ÔÎ%õð€JÛ÷.ÿ„ãŽWhCA;¤zµÍuÅʱ±%}"ïY°²&A;ølfb?=çgJOÖ|,m¸×·þ•9?2ýj"®0ib´.ö6>vÊ NÂúL›žùüìÄÜ1š”šÛœôNú\ }äÇÐŽ¤ãwG±D¹hma×jk;è£àFj™zâîŽ|{Ÿ gðÅ¿gø¢õ¿½J”Ôâ3¯1N8h@Å¡Džœ¸ÚfAJN"&›Åz¾$îÆÌÇ´åL¥éÍVçdë­æ»'–è÷%øXy² UZO÷/õjœ®x@"Çœ5Ù òò¡G pð#×iŸô»–ÚÑÝå~Àíöxtp(ñ+øgZ#ýïÇ¢ŸBˆ§ƒÇ5mJufÝR…üœzÕÔwww\®ë"ìŸjöƲh È´ž‘ɘãx|Ów,““G'%CÿCН#Ú¡!3#¶;Ê–ÅÁ^Ìg#î;°¯éÌ|÷H͵„4„sb€h¡­î›õÔC›l”côþ ìú¦Óv’ÂïbkÝòâZ<Õƒúîh¾ªÄçl6(þ¤ÙsÝRqmEú)È=ÒœYC­gÀ@ 0K.ú.wH uçÕŒ2i÷…’`…íXæW–ïÓ÷½•ï‚vܼZÈšØ,Ø—‘%–”lQìBÛœ 4HòMA¦qåý‰\6ja¯ª*½šÆ…§´t^,·T9·ÜËšãOK!-àõW&&¦úä$ªÒ ï+¨€×îܶ‡eISYáUµ üs¤‰Î¿[¸ßïQ*kxiþ°E¥{i´‹Mʧ°ÑD[ú÷Zàô¡YKm; þxÙð èßhFY‘î]x¹éjKâ`¸ƒƒ‘£‘Í%ÞRäžPz©!Aí׌„t!–õ.+eÁmj*]xf²·Cw»<¡5Ÿ¾ 1 ø”«»ú¢—W¥ç®§”=rì§ ·5 sÄÙ¹¢)”îÇ4½7ŸÅBáO ï«Õ wÂôÅ¥Ÿ!v„=3ÂTyø˜¢EÙ[Cqj×*ãüm® ÉÍøx`AcÞœcv)A֎¥ ˜&õˆšÂ#lNžTê.™èÿlÜ`*Ë’µr—Ãé L7Hy"yK8áAÇÔÚIžÅë]~uØ›}Õ¯1F‰m±¢Ù>ïô!M©ñž þpÁÃb ¾Ó׿P„O‰W;âÖŒ˜ÂÑ)Yœ—›«gýŽ›N¿Ó@,€xqUr<üШ4Ç›Åþÿ f=4:ÖÔ H×qÒÓt@È”Íæ‘0·Ú‹ÌŠÃ‡e”ŒH,ÿÛ†póq¥€E?˜â8µnbdOó›liÍ+@Àãs2pA°°J78.z ÞŸ( ¶ùJffÊdGÒFK‚îqæÌç*¾&H̽Ø$Í‹(¾ÇŽª@¼a0‰ÔH*¾ „W­¶h”{šæ‡gqÏIc²ÇÝ.í·ß½,(t¼xwo/Ž\X¨َa{»þþÐìз}ôt ÝyˆÍÅãÓù„°«:uzæ—¿vG3Œ´šžÞ¦ºÕuØDROR@Ú\][àú"žáÇuÌf¢çSM¸ùô=“®ѢÞS.ÌuQrÉÙ9†šîK)„›örl„NÚè<óñN³§‚aÍž…éGGU¶Vù'÷_Þ¿¨¿¬pÇʸê1ÊñdlT?éH¦B¬Ô ·á¬ÚG¶T Ë2‰“¼i²: Ò´ºä2"Ñ݉ÐXSÓ•òúP{¾¦¨h±nƒM:ꚇͪ„Õ¤}½ÕDëãîŽ-ާ¢ÔñJ/…ž„$DltŸ¡Ó•9ÕW·›á~§„ùú$Æa=ð[!þbXªq¸Tž‡’y–°O Ò­-¼v¿¢n ¾8zy'ÙMìáÔBùõ`G·³¿Oœk@w¼åó÷c{ß^¯äËÂÿÙ°¨~d[þrScÚ©‰{F_¥çÜÆ¦“HAUµEùp&÷)L!ƒõÙûéNÛb¦H=?6- ÍŽdÅÕ_¤_Û‚Tð2íTRfâUaÖz®L½“~f,Џ˜”Lï$u´’퇘µfI,§ bW/ Â]§ Ùª×22ž•ëÅŽbw;fÓ§FíbŠZâ0WÜ òæãO’ÔÔ´‹ò¦?ýö$V”f-sýö.8kÈtò>MbÓѬ!k÷9Yºî´Ý›#G{-#RX5K¹«=ù 7`štà\¸7.q6$HsMÀqk "¯ÐÔ‡D"‰]»s‘ Ë<•ÉWï©Þ”9M!›‚éJ·Æ‡EPD*õ±¢i¾¾£l×6_ƒß]Mqc¡‰?…8ëçñSgÒâyÝèÕªt¥Ñ‡-CCjQô9tNÈÅæÔoAƒæ¶e¾¿[Éáè&FåØ.Z×k“»w#IÙ ÄïèsêaáÀÑ?,`æ¦ü`…ëÀM%¬F;»ÊÇ­äVw]ú»«U®¸ë¾r‘qbêA]½ Ïò{8—®wçÐŒ’f_ðŒ´etÔÔ°'—µÚ¢j3}.¹%mÁéJË8~Ka—ÊÇ.XñO­ù;œ“Ô™ÔëŸe*]«xÁ`Õ¶ë¢;­FðT:úÉR³ÑNš‚ädp©w7g$‚:EÝ 7!7eí0µÖ1óWxëã’¬¹”›µ¼fKN&`o:\ñ>`]á¯M©qLWÞ? šþ½önïb©¡ÝØy‘L]cÛz¶Zßû›0³°$ö‘{¸=›Ù¯4ÄªŽ¢“-uÎ^£9¦w³8uøã’Qß<ŸrÌ”Ù*Ÿ#ð„“µî’1kàíóؿ”¬A@Î'†µ{ý;wÍ#|Û¶‰ƒÁ‰ÝVÙÎÎŒ*ÝóO‰8§Éù`;X©|øMêú»‹-“Äü%G!6ÓöžûSsåÒ\7z®Óluæsöþóìn‰¬q±iâo)Z…]‰ö]6Ù©Õª“›FkóD¬èa7KÜùˆf ïM…”ÜÀ£†•E—…xßí\Å‹â‰?þcþÇòÑ¥ýš#ü2{½¹š©vÞª]Aö;WÕ|wTÚIZÏPWज़×óIÓ¤Õj»»æþýšÇºß>$©Ás¢%ôuŒ¹ŠÐu"™ Vß: ÒÇÅʤ¦ýëZqE̳±³·ÂW*´I3ng_©r037¯ÊJ5ZÅ`ï+mpOФúÎjØ%þã_iÄffäM{ý5Ú²hä!m³ETgÜ61À%vñ‡zS¬˜êôùì3F@o5ñËUøóV,Gú7LÂe‰›?).‹µ¹q¢FqëH ‡ôu:Ëü˜SÚ,QÎ0z™Šþ{!&RÈnª€¡§A½[Üs1@ê+ŒLݸ¸ÍÛ‰âfìí›/I3²§‰Ó –ò2h÷1”Õ.D¥j¹àùxÀÈækðñ ß{hÛÊ®ØmïØbëOe…%ˆn€wÑ„À‡Ã¸>SZc8æ3š.ov‹Œ,Xc_}¤&‰\þ“)mžuH“oÅ'‰@ý‹'õ#EÈØ¬+ÄéS%ëHÖo7W¦úM!,„rǤ#$!Y¥ü“ã(¤œ®ïµu¢ˆI½DñiPÏÐÐL'…¤lýÈ­dXrc 7ŒÎÚ°yÕÀÜ?¶UÊ.«jÅ_Áá$¡®›oÉ]ˆÚŒÇ*8éà®WkÌ™›ñÕ¼âö£øÒž"°­æ!—º{G³7’ˆGMò¹¶Ï‘þzP ðêN·ZqhšT-Â…ÔŸ* ¡ÐßÓÿ ´ï»œäH kåê«u— ´é›9…h;Yr4u ÍbÜ”½ËÍ®nþ`ÿ‘cI»ô§ÉÜœ„^ƒºüõ+‹:¾ó sÔÚ-°u.4¶š‘ûüÜ­¥&Ö¡îŸZc|£x¶žký; À§çio/Û7 é|ŠÇ^¡Vü´Td<2F¯~äú?{>|ÿÕÅYÕTc‚ì øªDÛ)ÎÚåÇ€aÏ¿HwoʆJÞS®áÍÿ‚#{oF9Êö ÄøÔƒ]*aôŒà84@-kH‚ËÝËíÓêÄ6‘]哿Ø=ª¢:tž*j÷>g‹¤6*&v„"ªÑxÁÅRVaÜÚ ´t€4Ú]W\þÇDHn‚lò¦»D4È!ªGX9óKå‰XŠioÌS£½ÂBèõÉ1™ŽSå[š‰—ê#Š¢|žÙg*rn^мž7»ª*Çt,5¿ý>t[ ä‘/®ÖÙi¾¬z"$Œi>Ô¸$j¥\b™o[žŒ‰Ëu:UûÙæ©Ðú-äÑ”pŽ?‘W×€—"Dq Ýç(\Š$ç’ÊŠeqpv å9Ó8fļ‡â>€†’fe…¼«”P½s\‚æi x~ÛœÉ8¯iŽÅ¹på8’ßíg>'ì´x„*dPÐ5 ßÄ Q¥dph5>cüçµßï¦ãzRJ~ì”ïo& ‡),ž˜$ó­)@³9ˆ®ˆé#¿ ªñÑ­ÉÒ`ìá'÷‘Ó8¦ë½K„ëQm¯CiÒ>èÖjÍ[<ƒÑe'òŒZ%~)tNèÒ™<,'wÁVtvå×zbí×puƒ| ÐEÿFîîýæj«(T±Úf¤)SÁ‡´Ðß`¬%Ý•Š·–”~é:áî¹’Ñô|Îýk Sû<ßÚŸ¦ûìW‡åä²zí#òi¦º¢Ïî(‘¿ÎßV0E—#g¯\ðOVf:<|‡–÷‰ë ¹’'(²xû{ަ”­Éç'ãKzjÚþf.wì$‘Š©y–ÓG~" ´×9.–ÍXHÙ¨ ß8(k_­3~®Ýc9²  k´BCšO4ƒúwsU°~ªj5m9h–§ý:jJN]±ÝKÏÃÄ!¢ÓÒú%ÖFëúî…àí´·½.ó†.+}•·ò7ˆùÕ“½ð‰ô“¶Àí4NÌoj?ý-ŸðfŠ)Cš»qh•OÝIýãZ—©rhòô³€âÙˆàk¢º‡zSŽ´ç{ì åxïË Væ ¿ñ’Ù?O|ÀbÍíE5ääA SqŒŠ-Ù-xɼaÈúA6jgó{îÌEuVO›Ìwì –ñ§=(¬d¯)P½Á7M×̤§¿u‰¾êfè_ãÛ‹óa+ UŽL§`ýz#ÿ`JÊ o=«)š€Ã|Ǥ3Õº˜‚·jö€@ •e¥K÷wG'GŠäèÁØA'Ú„Î@ÖÌÀ°øøéGD²n޼~ô_ñ ˜ÛXhÅqš4ôúa¼x3E’æ…û¡Hüšíõ ²Q°úà§-ÏžØBåO¶’NàRä!¨´Ïä,Zæ‚ÆsøºnpŒ{ðZäµ?øÞõS>Q­uúd¨‹?þÐüÑuÞ‡Ùøµ­Š —RŒµÆ¨%E4àÚ7W"…3œÍë¸óß©ñàFÀY·™™YÞÕÁ´ô²áïÔ-«>AƒÉN7K7H¢t>ÑÅ o*¨íÝÝX e\:¡|~¿þ;³ðn´L0 ÁŽõ:.'„wcóêï+?6^uõõIÙCKÁÖ£;÷'r¯Q8äÅH>jô†.ë¼çý£ò# ¤šCëyM|û/ÂxÓ¹¡{ÿ¯/ZÔíX ÜMÓïŠe1µ>¶,š]Nˆ?ÞƒËˇ¸Ù¼ã-—z|ÎÌþÛ¨Ž:½¨ˆ4Ö»ª)hœû yÇjÞ³J*äeWKÐ7; Ñé MÙÏY¡ð‚í [ì˜ËLæœÜ.§Cpœw†’=Æ8—.çéŸATôÅ8$¯^Ò½JkõØU‚€à¨k$`å°·Žé9ØÛ3óK,Ô#í¶W 9?ð)p2‰Rúáýû!yÊ ºn8lBpŸŒE},¸j›Õà àv ž´;èþz8N\ÜœüÃðˆ4ó?¤ï€°Xßœ®]Ê-ŽF¼!°š.”EæR¾8˜Âý5Úðã8W,é,ø¦ó'w_ü—¸ qÜ Áü®»½œÖ8ÆŸna"—cÜ뛿á×~—{~ÅÒ\Î=¦µ £JŒ] 3r_ÿž„`ÌõJ!·¤ªc" {Ÿ0ùçáŠ?h”ªdY$"¿i¥'5Ãö¨a eþ|;(Þ¥šÃ7¼Ô`éü¨a^$H÷Ÿö4 ‚Ï¿£}!Z›»žæ™þãÇ 9Q§ÿÎÎ=Æy— ævôŽM|wÆ“ùv2¸6-`à®B¹FûHu´Úzkuãv€ßaÕ²ÆÕÈÙSÊ9Ç»¯úZKÊA»½·ìXxã.a-qXf¬ŒFøôߣ.Ü„›À¡$êäãÚÎ Ï5NÛ±LêEs`wÍíÍW—¢OsÄ‚@㎛½¡ÞÃ+F†Ù¼Â\“d®äQüÉ7‹ÏÙ¨ÅãÓþd?Üþñ€wÝ­û½æ¤1Ô·Z¶¹Ö„®dt ÿd‡Õë9)j >×.õºvqâ×t.u¼Ç[m¾jµY 8xZä2 ¼ø» ¼­³GÇ&¡›>SˆO] ˆ‡Á¾åHùŒ»Ò§R;U7ª:’ÄÍøH,k¬¡Å˜<ÖðCê^GÐÛD~b¾Ùt˜i\Kj72znxz!…º”‘5KeƒÍXøyü±yG””åp{ÝŸÓ͈Ïùzǽsš2Ǻ’áxËq¼4ºå­ÐXó‹xËU.ij6ƒO>x§Æx‚„±†qFÉ¡™nM&Ó©Üü‘ÛΈۨ-SíµºÈEÄàø·¹Ò–æ5N­WÝpˆm>$ƒÓËÐìë›$îÀ"}ù8¢Ð=eÁƒ”éÑ!lº×¢(Þž'‡¥Œ ­Q :¼]9† ¸ã—âûÿ2¤TPsÖ½G%6æk´ËÔZ@„R„j‚D –£ûÙˆk¨²#áݱùó'Æ<_ •mXþCðÉOPÓfR´@¹!ç\¶|Õó~Ï»úx¸#mI]S,gé.=­„) ¢BEú«ªŸûö–!ÞEº©r]jöëq$ŠáNI¸&ŸÕÕþ(Ÿ‹€ŸEk2!î›ÁBºY. /Êl ü*챂ŒËú Gœ{ìÉ÷?••hLpïõjŒ¬‡z‰ÜLkvÁ8º‹=ÏO÷œÐIA?[=h]6‰ãÁS‹ã¢){cÞ’==¯—ÐíwÝ FÈ]|æÐxØ"ŸŸvì#ÀÖ^1e£PÜM¯_ U¹Ùì¹z¸Mï)§øY!öŒC­Ãœ‡sM;ù*Ú$þvÇmÁ+‰µû]• ¦U¥‡êÀ#w¶3_Mq™õÉ<¾ØÜ—·QJf–ݾw^Í”ž/=‘Jx5ª³%45%š«/ZkÁII½&U®NàÙ>?'zv¶úQP Ý?ÜœÏ6<÷ÆÞUKjâøFR^¯¡XÏß¼jñ=#"l^®œ·ø^XTñ7èZª– /óI ÁYŠ™!“±˜ᑺª#Ò…ò ºl½:Ùéf#Í¥é«Á9I|yñÆÎ ÄR]7ë­";¡óß¶‰‰ÛÔÓŽ›Ô–ç$>Låê<¢ðÉGÍ‘bÌJÌE´:¸ªŸŠõR€d°P¹êÕÿûù#³ÿ;Oí.ðÃ.'Ôîê‹É ƒBYÞëúÛåÅžMt(Fô×UÁ¶9FžµÖúÝd¢Ü~óL@‘ûó¹qc¥–ÃÊR2—Ç\¿½¬<щ¤×©*ÛËmIUÛ9Í)_¹ºÐc8Žý˜€l!ܰ†Ý„´kßÛûõn&—õwç €Ó{ÌòÜM1ÞWömÚ´’øö“TN"áNydÊ£eþYJß÷•÷¾‚MùÍÖ{]WæI)Š‹ílÀ¯ƒsê(ÀÑÄ€Ÿ­K½gT{^Á@È£ýªz-¶Æ“ãt\8ÍJ¯xMMõÆ ôáW5ãOÔi¬^tÙÒ¼ÝR†ª¾CLt–ª<ý–¿Ò£YÉŽñ"Ê`ïÞp6#K>‡¾N›´8ßw´òoÙçPiºîˆ±Pè'¬ üîI–NÀÚ%Ï×Hñ–þáxãŽX T æE¿š¨ðŒôg ›êÿqipš:«àJÑA]¦#É0!¾ŠýTÿ«²Ö5jÙg8‘]ïQÆf2úËžy6úmõä+ÝùÎÈ!V¤î>‹y 'þ"/åc×î–l¹Ëc¶qta\4oý™*йí5Ë9\Ô?Mg #ƒ a“{ö 9dJéç}Î~Øo½ÑèΙÂÒô)N›;` ô9,4M±`蟪óÇ ð%ÉÆéË™nÏPVÅ/ÃûÝ‚ˆLLOn„ÿ­´œ!u{ùOsKuÕíòõH=’i4äFÎJëbE3.-.W¦¶Ó&§VbÔÁ¤­¸«"¼ä÷ Á²AF)éâyñ]µÈ3 ò‰UÏV„ ýbÖ—ð=È0HÂöø4üÿ³¢ 1­Þçüó’3}Q«úQøø|ôMö þtàé×ýKòÚÞ |?N+ôTž¶–ûhÄ(!ÐuðuŽCƹǟÁö £¼G–c¬)íþþêÞ‹–"ÇÎ`q‚'áSÑÞ’ù8i‘Z\nºý}~碣`§KKÊÚîÛm}!1×E³u¼âyµšžÎ› 8ðnÔ<áŽv ¦‘Œ÷i5%|>¢2­FýÀHÕ‹M¥Þ2Ü(~”át3x©ýÛ»úúÔ)TþßW}¶I×™ÅÓ‰~èò85`®Év2Ó¯%>M1=ÍM†ºÄOh_>\Îýp2àXÚ_Öƒ”õ`{WvêÎJz?`ÏQ æAµ]ñ—9À Ãòð`ìûægPˆ#á"LîÆÃ±hSåGŒíSM±ð2,pàÃåýÐîÌFp>Ñ?å‘I] JIé|âükKÜnŠWuc¼5{”V©×‰W=À„RìÆŒEB{z€ó~Å4¼rAŽáv¿þâeT›Ö˜¹6Ðʋ´Ð~¼ÚõîËe,¶rO¢yõ0‘°«_;‘-Ò ç¥š¼T”<Uuɯ+埘¹o> _tŸMöl~%];,šÚN†» ÖªÝùÐö‰@.*¾íf6 Ó1r5wùãž_Âjž¸ùÿÑhoê1>—ÚC 1X%+¼°é<§\º+Rx11Üḹ̦([¼\²Lœ?ß×#O³q3Ù¸Ðþ䛡§µèG’Va´NÔÃÄ?xàÕ/*„LEé°’†~Ô‰‡"_Jwß>ÈÂÜy©vf¿UÖ™Râ:­“Ĩe—øèæcNÈg)”yäýÍ04?ºˆÁ‹«‚לʨ7KS ·_Ìeµ¨ùE{õ0ÍùÏø¼”ÝMàõý,ˆ;¸¥^tö¾ÐÛ8øþêÎõ(Ïô´ /É®)®…ŠðÚ¸v.óØáäô™ÿ^~m®_Œï”DLyV#„òÄê,?d§“–y2—©æèA}á9½µj~YÎáæ‘Â7Óg‹5F&—‹Ç;"Fe§êpl÷ã” „Ê&Ç6¸–¥¶½/X`~ì} ŠÞžÙz„Ü(R ùZÖ—êmIÿ0½ùè·Æµ=áÅfT´~ó£Rå™Tæ¹ýÇ{ÿÚT;ýMF7'²t$쀆ë¾ÛØUÓó”4ÉØ™Ç%*IC¸ÝfäÂå=óiµß^v”‚të¡+µH”š¸éþìÜ Çh2<[Ÿ-ð’¼c€•T`"i$QzO‹JDgÙ1e@ê%Ž%g'zp½šIvk6ÕB’k»A7ø•;oÇß,Ä@çEL9ïžù@Å{Yææ×|9ÎpôE{~‰úH½:å×WýE®¤¨#eº$û‹ž¹Zñ˜*EfRÄ´‰« i=iT8 ÔÃâ¨C[WÎo›Pù{œ›kãûÿ-,#Òö¹¢>¥$¯R}2‡V,1M7MÎ}ÿ“ð·‘fídàßf¶ÉƇn9Ÿº;î)màÇÅà11°‹ªÍÆDš_tL’žð~e£5íæv;±ÊY«pã]‡¥QœÞÂ|³ûUE) ¥éÔÇùWå6U“ƒÙ–Õàpÿ|h­(I®ŠÙÑGPþÃ<ßü_‹kÛïƒIë‘YÁQ7Ë¡¸>„ÉÊeäÝâšHÂ[ù_‹b$º3JGûy8}Hà “ësñŸOϨ34%'œŸ¹n[ÿS.Ð\ R@/uN®Ë‹…e.,ìçúœP)>(#{'ÇõçN'uk;ìÀquÓÖrãö|h@|ð¸ÚÙKˆ)ð#°'¡{<~~Ú"ßÒù¨š ˆÒ3Û¾r_ \¤Çùµ±wöœh?½ÃáoZ»ˆŽjFKP(p”’³±˜6"t âí“˽®*&½ö¦K` o±G€¹—GðoŒ–3}=GéWÇ7“2<8ó¡µãÆ–($¹.ûú6Zràsù«;@ø°MiÓË9D8n ïLg3ŒÃ©RœwJÚý†Í5DPLÞöÂM¿²ÿ#÷Yýj{àØá‡¢&¸û#ÓÙhPrnÔ:Ãôc§¾CÿÍ äNgG…$@Äòýˆ¤Ö¯šîuxCYÞœZÝ¥(i²öØåå™Þ΋u/ûärCO.ŽÄÿ!L=Qí;|CûP {¦kq-ÂÖnÐJøÐ„ƒÉáZg6ƒ´á¹ó: jÒﲉ;í{¶k“^ïƒé°!ß’½¨8'Oè¡Èª $åï-Š£ %–h­ûé*R¾º 2¸Èª±~‹ U?”ªhCý4Ê9?íž9¾üOöW²MŠŠþ;±…“r÷‘³«o)ˇڡHÍ«A7kÞÔR_T„Áæ¯×zo¼_ÌÆ=‚°¼&Šª¤·ÿÏz¼þ;®Q.¦—[ÅçÚò¦Ó~uÉÛ6?È›5‘ Œ3+´äÀ²w•Íï¾³ÚH{ñb.ã§ÿS>3¬w>¬¿ùËÂ!»’7!•[–WÝ¿åD‰ãÐÜ`)¿ƒogŸ\ ©$žÞI‡JKOÙ5\…í½B»Ó}u'$|ms Àþ¾ò¯­‰qÖÚÇj ï´„, mÀý4Ûù…˜ÎгYäWО‹=vÐàýñ‹?—ÎoÎyß°j;[`ƒÿ›W:͵bUŒ”׺¡cÆ„U.ZÐÜ’§=Ü•|ªWúôò¶àŸ_#‰ùÂ&öúõkÅÑc5;;·,+¿âOÒÉ×1mˆ”ðSeÆ?G(èdXy'ѧÚòÞ‹orO‰°ãJþJÄ^CWÑ çÞ´Ÿu]&@DCTQ]€—(!nÿì¨ÈÐ>eçoüŽl'÷ºØ)[ÜÆ™ /RçM*ÂÿEMŒ7=Ó:.C@äµÓZrQèB,°…K.7kwû5_s¢uœÓ˜,>X”šLÿjÞ‹üܽ~Æ=Fȳ¾8±Ý0Òî¾mÊšEì´,[â»)yÕäÏ‰ŠƒaRa«¿æ¿Ù%@üÜ6D·[ïòã æùácOßBÊóZ9¢GbêEŽO}‘÷¯`F¯O!lI„³ïW:ÞÐÍiF“£“÷Vоó«äÆÙ–˜ž2ÅáKR»ÔnçdU( %›OVÈ;B8r‡J¢ç ½Éô/¨>ŤöѨA•~ñªQú:Ù.î¹Sæ±2fÍìJW°adzä´ÛþH(·Þÿº-¶™/7Õƒ¢U~™±'¿]h½‰ ¸ÚÙf'ùÞœ²LîÛN`@9èñýÒéÀ &¡Ñ-öry6›¨¡4—¯òä í´…Z«üõÇ2ZÚÇu:í)Çð0ó@·Å '‹ œÐmy‹W‡ª^Ë<4<,^^šºj ó¯ÙÛŽã¶À‘y á}5åÞÕ4.žÉ·[Ëç¶÷/ªï‹yøè=yþ*‹xÏÖ?ÃÀÿHêŒ7VôK¥¢¢˜Ê²‡!ìçßlê`=©Í~r¢Ê¢$Í÷üÏ›DÙ!˜´Ï@0*.I/½Ç'ºá|°qžo]Ç6v×Öœyßo¯)”ì¿æä^&…PÓbéŠfG0gþ)WÖ,öQi•´%ß]­Ì554{Ùu̇ð fß«?$&Z%0 CSŸ'»TíðhËW¿kw&$œZjîƒ==|;QÜÝFÿ3j„Ã'ap®½ÜYå_TÔjº¦ ´D3>}±²åU1C›cÈ’£õq/Z¯Ežü•àRµö;¬•Ì­ö%ÙØ~I+¹õßY/|·,3Wýñ¤D¤þ÷Xž˜Læ¹ÍÄÏCûólª€Æãƒ"oKKI~7€ÊW¡w™\¸¤|cž„÷sÃ`§~^Tå¬<ÿ#fÜT²:[ïKYNAê5øž;çûÌíÕ‡‡X­ÃGËXÆšàÔð¬s&‡j›<…ã¯`hOä{¡]ÆjiÑ¢œ¡9[í&Ñ ‘Yéªç¤¨;t¸¶(q9~íu’I©Ï?NŸc]·¶XíGìÖŽt¼ëí2hßrêÔ˜Ô|‚^«ŠyŠz\^r(•ì«YÅ–­ºüt:j}ÆÅçþ1gX…´ôÒ³ûy¤š+EÇx“@}ùN>' d«¾›¡è•Y½nr0u|%Õ”´îøŠrÇm»müÓê—í`áú½ÅñªSºíw|zAöyשD(;{eÞ®mcÃN]3‹Íˆ'Éçãâ/>)rŸ¦ìz(cœú‚E¿ÉçËÀ›Áøk)'Ó.ï‚Äæòªc½^‚»?äTäÀ¥“œ4ÉçèkÀ=ø‚4ôÂæ…ŠXðYOÆÆ¿ÛL`¨ø'Ûd”#ÖóO¿ŸªuP‰rà½å k¦®³vß[Ü䩌Wt—¬PáÖTØ3á (‰1U„æPíÙ£nÿQ*eñ>þÆ+{éY_7’ù¬ê)o!ˆD±€8ªñÏЄ…ÿï~b”Sû¤¼XB YrXÑŠÅ2^>.ͱú;Ž‚_¤¾»v?Û³¡ 70¢…]÷£¹Šy#>°)W^<—¤¤e{W‘ÆyiìÍúC›oìI»}3– f÷ؘËIú¤ùýuw–0ji+Fc|Y€…c)m·ƒÀÒùsq«7„ª³X¼=¶"q뇉wµé‹B÷…× Lõ W-†eï øØ>÷t9M@%9!|& 6öOŸÐ¨Ï{¤Åÿi‚ËuXÛyú}3*¬Üuü;oÌ—jæW³›:€‘ê~§¥cBçüWE²ž0ï¹ÎoC°é åê·ÏÖ l[/†ùJ¶´Íè£ÉØ ËNo=Ÿdu ›ÒŒ‡öcë—‰ÁþU›ÏJÄñ•“ÓÓ&˜/©Å£ý¥k£<íS’§þ{'2½ýô•Õs&šcç„ ÒEÇÃCBXP:’L¹˜Úc]´Ù&TpÂ@Ý€)[£R3³<Âæú.qzÌUZŽÌ÷Gâ”l&”.0B£­Ô°O‹[sÞ¼~AuQõïn•.Ýè@løBHÒØJÐÉ 9¡œÝTO=ò|a??§òLO“…pãp½ÎZ%u„ù{<Æõ»‡T%UA¨«§æ©àv…¥Ø½€ËÃí¯—€Æ½/ZvWml¦C 7îĶü |q_ÓZÆþ¬¸ÃbV>î3wLÇ1ÎoGS9±;wÚ=ñ†À‰âÕ‹-U§®y¸d*ZÓô—¤Ê yŽ~S”÷ãYC>¿±MÂyÜÂ,Î]zŒ8éðøÂ¹Sô¹‚QâIqâxqÀ_š#Åž'S‹cãóuÁ%ŽQûâì¾Â œºh9¾Iðûö­AVuM0=8¨þƒ3ó­pßÈ ‡éRÊ]{ê.Á z¨\^‰–5íï¤vð‹ÇMÕ ¾åp‰×²îf$½P¿P2A¿ûÂm?aFšÓ1êoÍX!½ñFL7M•dL.\²òZŒåóBèA¦Óe“cÍSçÿ ]±øîÔfœÄ#>U¸™Ò{ŒðS± V*»50S“*×R¿ ¡ŽG,>¹óïšhÀšY4¢téUáÇu#ÔœéÈú!yyÎQY)iÓlf’OhSXw噣Ë*t;EE5ÌUVŠ=™ / µ8žß&øãJ£/é1“í.g#À3ÏŸ‰}o=FX—Sx=r…®öòFÐz†×lgŽð|¡K› žUÖ¹¬Ñ¨©ØGq¡"R=K™ä#,ÛcÓg”:Œ iyÔ±)êñ@¯©AE–K9Â/ivô7·³|ŸŸ£†ß?tãaA´—¼ªÝSÞ¾ä°EaÀr¦Ó|\ž÷-PòÚ„ž „gú7Þ 5ïrNƒ.&2ïýô…Í«Ÿ¤\Óõçù’ýo»šév¯&MrB–(óiž›_±§¯òâ-ÃTäFsæê~Ãs6}¯èMïçõÍMÇÕÿN_ײ‰¦'šßÏb‚@‹ó«’±•GðΛAì¤C³¦Îòªœ:án¼ c5V–éÀc(ØŒ´8mÎNøŠØ'ËåÇw~¶Š²X£”p==b;³¼¸*~ƒÝ~ºÄÅ5äcñ|ðí¿(h,•B|'à'i~¼’²T‡7(ÖmSDQìG¨/ý}çùJÎ73}¹'ÛB ÁÕ,Ó‹7ó{¶•Áà›õQ®ÞipO@&£ÊÜkjK]Ü4Š|¶Ý¬·ìXy\JL>BG_=•Õ¢Õ„öNq$s[¬“Tsè{ç —1ŽØ¤Èe\Æwq;âùÉŒ·ÛmŸéó€_•ù¿ÐH’ÞÙ¤±FF²KE¬~Þ·¡²Iœ'ÄשG“Ñ鮚À–‹I¨oæÚ93à6ÛTdãxá«“Z¤«ïí:—Pw ¡˜ß8ÔÍøNvœn>Ù_€+ª,zw<53Ç»¯EêƒÔL¢áÚòM,ÈŠWxò×F!&Ú­ÿQ¸¯Í5Ü«¡¸Ûm yÊv¿Xs†¡ZË®âQ^Q²p`L?ù’HbƒZ­d-ìÚý:ÿY7$ç|‡#‰‚X<}Ï;R)ZsÌOæún¢ 7õ-ؼùèÿ™×§KGûJ6<ë\ô-ëúq—¯Ö›Ò.í¹cõåuuø¯§³•—_þ™\±cß•/6Žuç¿O–»ßîP[\ò1øÛêóæO?üî0g¶„W¿ Qÿík®§i½óbmÌÝÐÙRy÷þýœ°ìõ–ÿ¥oó÷¯5Kp¾´šeÏ+çEÿ'Ÿõ.Iž´ãéç›o’_–Ö©I¯—ß±kò'– ÷V2ý•²ŠKðúo¶6~ëí×_wdþùn|GñîR¾k£ü˜‹W׳ÄÊ~Ë|³MJ%FüJïý6¯§¯6ÞqùZ|!ùâæuÓ‚{}«ö)*¦¾=Rìå}=qûî:©)™Íkök^Þ|Þ.›}nÉÏgɧç èášõøFÀ]OÉæ7¤ÊýÏ?ýµG¥øÛyÕ%µ¥zQ¬¼gÖšeõ®w_çx¨ô ßõº3½N¯?]réoUxõ|Áq¦ðÓ;Ërö½4üS–´_îúŽ*›œweWLaœ¼øVéÅ ñë›;¬“^0§Ý2£/Ù¹4ÝÆzË«Ûé¯?OÒ®“Ýê¸øGðîÆWåþ^pæ\»v×¹ÞïÏnHn`4ï›ûËrÇ,ƒyÓ˜Ë)Øg~í/Øþ[\!Töìê¼:ã—“%ͶN=έu/ànç^ûy²·b#®~­4z¹â#sþË®º½R"åz÷”¦Z­ò“Ö»(³=¥ðΓºÌ—×Ôí®ø·Ž_ËÙª$}QÛÎs3»36-x­è´„›¢³/>ügÜÐð±ü¬Ô= ðtõsYç”ÐPK¶CR°rC DPK ŸO7'Configurations2/accelerator/current.xmlPKPK ŸO7Configurations2/progressbar/PK ŸO7Configurations2/floater/PK ŸO7Configurations2/popupmenu/PK ŸO7Configurations2/menubar/PK ŸO7Configurations2/toolbar/PK ŸO7Configurations2/images/Bitmaps/PK ŸO7Configurations2/statusbar/PK ŸO7 settings.xmlÍZÛrâ8}߯Hñº•Á@ÈT”¹îc›l p"$¯$cÈ×oÛ@v’ÀLÖàªÍC(,©»uÔ—Ó2w?6+zµ&BzœÝgrß´Ìa.Ç[Üg¬±qý=ó£òÇŸÏ=—”1wƒaêZ¥`м‚åL–wÃ÷™@°2GÒ“e†VD–•[æ>a‡eåŸg—ce»'ê±—ûÌR)¿œÍ†aø-,|ãb‘Í•J¥l:Ü’ ~‘\¨\ÅÅq±¹Ü_ £Yš„Wlx  <ü9:O ïþø¨/_¯H»@ êÛ)MHj€|" ÁW&QÁG'N–ÐÞkÑñÚþ|¶LÈghˆ÷ö¥’ñcù#1ò1^Ìþ1‚ˆ¼ˆñ§ë‰Á…ãaLGÑ:»ºt Ì€û‰úBX2Lj<ˆôŸj™ bóé^jö÷ë«dá±°‹Ä ÿrý¹Ùä·Ç Üò1RÇøË!™žáÆ5†ršVq4M!InoªCà1•ܰõg6×Óˆ½É¹ÛÜ5Kš³Í™ÝW=Ôÿ‡ÃÜdh½úcÜ¢O»Êj.׸ù]YÏݰû~ª­ë]½6ß«º¾4á³áŸ¹*y£¦¡=™ú¦Æª[dµÙô¡dFE§5yíLªÜ)ŒÖÉÃk·®·Â£?ƒ9OÓ‘ïäoJV³´Å­Ç%šÛOùRˆìÉë`Üh;¶±åi0kãiOsìI€ëZØ©ë²[ ÃÚª·v`í¬9 qsQê>Ûn«'gÓu`ÞOãyd÷è`¬·-£ÚæKnNnb›‡þðÉÆÔÊÓÛÙØ¯öô*àQ­;ùÍÚ¥ÕíÌîùnž®g¿6ô ÜÖD³`×»¦Ù·íÖ°mH4û/“g'_\;Óp‡ö(ÞÃ|ÂÝé²\6¡ºÑÓÜF¯aÚÅ`6}4Ÿì‘gqkåsK§ÆkaLLZ4„5;ŸÒz=Û’€üý}Bò±`\ÈüRg¸J{‘¢£üPCÔ h\ÑÓ*8•¯¶)UÌ·úRã+_uÞ§9aBê½CÏð„Td™z`½`å8^¸]æŒØÒ˜¬|8ŸËt€GNÇ„ PòÈ4P²ÆN‡!Ú!ÿ«ä.:à¦\?æ1¿©šFiÐ}Ÿn-ID)tyñû…à :på6ùHLÞU /ЭÉdw#Õg5ÊeŽE¡ÒÅwn{y`¢]4)w­ïoë"z‘Å×ã*Ή§¹KÂ\Òá 8£ŸŽö'A=_—oYQg.XK°-`ª0è6nÒÀÍðÅiº@-Î= ¼(¢O“ÂMÎd2“.¨æª ­*¨SoÁ -™Šû.½_©IPœ÷FÖïå‹X-Hw¹>ŠqÅÓ´)Pí êXXŒ5«Úy¤Ö©d‘¸þ^_O¦åCqwÚ`9†¡8˰ÕPòà±à½Ñ xÌ)wÔVWu©¨÷,Ϋ»dLé·BÞ$›HrL|¦ñä› nã^nb£ ÂpÝ``p‰Kj‹³F<Œw$ÿk‘áèuØz›ì†@›(ù8¬<õsÃgá½9êtŸÂáŸÆg J¡Áqê‚T)„ûÎÉí®,\0Æ dtÆc®ôÎ'?> )~"gû0ìœÉ×zŒòU³Ÿé|9²°#§{åÏM—³JºR—ŽÿŘ÷aFæ±çžžæ…üÕr—ŸPKÝrèT­PK ŸO7^Æ2 ''mimetypePK ŸO7P݃«õ® Mcontent.xmlPK ŸO7XsØî 1layout-cachePK ŸO7)Ãø© l/ }styles.xmlPK ŸO7AøÄ#îº^!meta.xmlPK ŸO7¶CR°rC D‚#Thumbnails/thumbnail.pngPK ŸO7':gConfigurations2/accelerator/current.xmlPK ŸO7‘gConfigurations2/progressbar/PK ŸO7ËgConfigurations2/floater/PK ŸO7hConfigurations2/popupmenu/PK ŸO79hConfigurations2/menubar/PK ŸO7ohConfigurations2/toolbar/PK ŸO7¥hConfigurations2/images/Bitmaps/PK ŸO7âhConfigurations2/statusbar/PK ŸO7Õû„ù! isettings.xmlPK ŸO7ÝrèT­ØoMETA-INF/manifest.xmlPK(oqgalera-4-26.4.25/gcs/doc/Doxyfile000644 000164 177776 00000143661 15107057155 017556 0ustar00jenkinsnogroup000000 000000 # Doxyfile 1.4.6 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = GCS # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 0.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to # include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../src # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py FILE_PATTERNS = *.c *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO galera-4-26.4.25/gcs/README000644 000164 177776 00000001151 15107057155 016146 0ustar00jenkinsnogroup000000 000000 Welcome to libgcs - Group Communication system abstraction library. libgcs is intended to simplify the use of Group Communication semantics in applications which are not initially targeted for it. This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY, to the extent permitted by law; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. libgcs is free software. Please see the file COPYING for details. For documentation, please see the files in the doc subdirectory. For building and installation instructions please see the INSTALL file. galera-4-26.4.25/gcs/SConscript000644 000164 177776 00000000035 15107057155 017300 0ustar00jenkinsnogroup000000 000000 SConscript('src/SConscript') galera-4-26.4.25/gcs/ChangeLog000644 000164 177776 00000005530 15107057155 017045 0ustar00jenkinsnogroup000000 000000 2010-07-18 Alex Substituted gcs_slave_queue_len() with gcs_get_stats() to return a wider range of gcs performance statistics. At this time it includes average slave queue length, average send queue length, fraction of time spent paused and number of flow control events sent and received. 2010-06-16 Alex Added gcs_interrupt() call to be able to interrupt scheduled threads. Version 0.13.1 2010-05-31 Alex Added flow control monitor and ability to synchronize with gcs_send() and gcs_repl() calls thus guaranteeing FIFO order. Version 0.13.0 2010-05-20 Alex Added gcs_slave_queue_len() query to API. 2009-11-21 Alex Extended state message to contain previous primary configuraiton info. Many bugfixes and cleanups. Version 0.12.0 2009-08-09 Alex Added possibility to specify desired donor. Version 0.11.0 2009-08-06 Alex Refactored interface. Now connection URL is supplied to gcs_open() and not gcs_create(). It is done to satisfy wsrep API changes and is generally cleaner as it separates library initialisation from connection establishment. Version: 0.10.0 2009-07-21 Alex Added node name and incoming address arguments to gcs_create(). Thus it should be possible to give nodes sensible names and see them in logs. Version: 0.9.0 2009-06-21 Alex Moved TO module out of the library. Since it no longer offers this interface, bumped minor version: 0.8.0 2008-11-16 Alex Many bugfixes. Fixed handling of self-leave meassages. Switched to "mallocless" FIFO implementaiton in gu_fifo.c Resolved apparent race condition and optimized FC message sending. Package version 0.7.2 2008-11-09 Alex Changed state transfer protocol to require join message to be sent by both parties involved in state transfer. Package version 0.7.1, library interface 9.0.0. 2008-10-21 Alex First implementation of state transfer request protocol. Bumped package version to 0.7.0, library interface to 8.0.0. 2008-09-29 Alex (postfactum) State exchange (GCS state exchange, not application state exchange) implemented. Now we have some sort of quourum calculations and global-scope sequence numbers. New nodes can join without having to restart the whole group. Bumped package version to 0.6.0. 2008-08-01 Alex (postfactum) START/STOP-based flow control. A little bit ahead of the plan. 2008-07-30 Alex Added gcs_join() and gcs_wait() getting closer to final API. gcs_join() moves connection to JOINED state. gcs_wait() blocks waiting for the group memebers to catch up. 2008-05-14 Alex Added gcs_create() and gcs_destroy() for safe and clean initialization and deinitialization of GCS connection handle. 2008-03-23 Alex Added gcs_set_last_applied() and gcs_get_last_applied() - calls for voting for the last applied action. galera-4-26.4.25/garb/000755 000164 177776 00000000000 15107057160 015423 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/garb/garb_gcs.cpp000644 000164 177776 00000007400 15107057155 017703 0ustar00jenkinsnogroup000000 000000 /* * Copyright (C) 2011-2021 Codership Oy */ #include "garb_gcs.hpp" namespace garb { static int const REPL_PROTO_VER(127); static int const APPL_PROTO_VER(127); Gcs::Gcs (gu::Config& gconf, const std::string& name, const std::string& address, const std::string& group) : closed_ (true), gcs_ (gcs_create (gconf, NULL, NULL, name.c_str(), "", REPL_PROTO_VER, APPL_PROTO_VER)) { if (!gcs_) { gu_throw_fatal << "Failed to create GCS object"; } ssize_t ret = gcs_open (gcs_, group.c_str(), address.c_str(), false); if (ret < 0) { gcs_destroy (gcs_); gu_throw_error(-ret) << "Failed to open connection to group"; } closed_ = false; } Gcs::~Gcs () { if (!closed_) { log_warn << "Destroying non-closed object, bad idea"; close (); } gcs_destroy (gcs_); } void Gcs::recv (gcs_action& act) { again: ssize_t ret = gcs_recv(gcs_, &act); if (gu_unlikely(ret < 0)) { if (-ECANCELED == ret) { ret = gcs_resume_recv (gcs_); if (0 == ret) goto again; } log_fatal << "Receiving from group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Receiving from group failed"; } } void Gcs::request_state_transfer (const std::string& request, const std::string& donor) { gcs_seqno_t order; log_info << "Sending state transfer request: '" << request << "', size: " << request.length(); /* Need to substitute the first ':' for \0 */ ssize_t req_len = request.length() + 1 /* \0 */; char* const req_str(reinterpret_cast(::malloc( req_len + 1 /* potentially need one more \0 */))); // cppcheck-suppress nullPointer if (!req_str) { gu_throw_error (ENOMEM) << "Cannot allocate " << req_len << " bytes for state transfer request"; } ::strcpy(req_str, request.c_str()); char* column_ptr = ::strchr(req_str, ':'); if (column_ptr) { *column_ptr = '\0'; } else /* append an empty string */ { req_str[req_len] = '\0'; req_len++; } ssize_t ret; do { gu_uuid_t ist_uuid = {{0, }}; gcs_seqno_t ist_seqno = GCS_SEQNO_ILL; // for garb we use the lowest str_version. ret = gcs_request_state_transfer (gcs_, 0, req_str, req_len, donor.c_str(), gu::GTID(ist_uuid, ist_seqno), order); } while (-EAGAIN == ret && (usleep(1000000), true)); free (req_str); if (ret < 0) { log_fatal << "State transfer request failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "State transfer request failed"; } } void Gcs::join (const gu::GTID& gtid, int const code) { ssize_t const ret(gcs_join (gcs_, gtid, code)); if (ret < 0) { log_fatal << "Joining group failed: " << ret << " (" << strerror(-ret) << ")"; gu_throw_error(-ret) << "Joining group failed"; } } void Gcs::set_last_applied (const gu::GTID& gtid) { (void) gcs_set_last_applied(gcs_, gtid); } void Gcs::close () { if (!closed_) { ssize_t ret = gcs_close (gcs_); if (ret < 0) { log_error << "Failed to close connection to group"; } else { closed_ = true; } } else { log_warn << "Attempt to close a closed connection"; assert(0); } } } /* namespace garb */ galera-4-26.4.25/garb/CMakeLists.txt000644 000164 177776 00000002045 15107057155 020170 0ustar00jenkinsnogroup000000 000000 # # Copyright (C) 2025 Codership Oy # # -D_GLIBCXX_DEBUG makes linking with Boost program options # library fail, so disable it for garbd. remove_definitions(-D_GLIBCXX_DEBUG) add_executable(garbd garb_config.cpp garb_logger.cpp garb_gcs.cpp garb_recv_loop.cpp garb_main.cpp ) target_include_directories(garbd PRIVATE ${PROJECT_SOURCE_DIR}/wsrep/src ) target_compile_definitions(garbd PRIVATE -DGALERA_VER="${GALERA_VERSION}" -DGALERA_REV="${GALERA_REVISION}" ) # TODO: Fix. target_compile_options(garbd PRIVATE -Wno-conversion -Wno-unused-parameter ) target_link_libraries(garbd gcs4garb gcomm gcache ${Boost_PROGRAM_OPTIONS_LIBRARY}) install(TARGETS garbd DESTINATION bin) if (NOT ${CMAKE_SYSTEM_NAME} MATCHES ".*BSD") install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/files/garb.cnf ${CMAKE_CURRENT_SOURCE_DIR}/files/garb.service ${CMAKE_CURRENT_SOURCE_DIR}/files/garb-systemd DESTINATION share) install(FILES ${PROJECT_SOURCE_DIR}/man/garbd.8 DESTINATION man/man8) endif() galera-4-26.4.25/garb/files/000755 000164 177776 00000000000 15107057160 016525 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/garb/files/freebsd/000755 000164 177776 00000000000 15107057160 020137 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/garb/files/freebsd/garb.sh000644 000164 177776 00000006172 15107057155 021420 0ustar00jenkinsnogroup000000 000000 #!/bin/sh # # garb.sh for rc.d usage (c) 2013 Codership Oy # $Id$ # PROVIDE: garb # REQUIRE: LOGIN # KEYWORD: shutdown # # Add the following line to /etc/rc.conf to enable Galera Arbitrator Daemon (garbd): # garb_enable (bool): Set to "NO" by default. # Set it to "YES" to enable Galera Arbitrator Daemon. # garb_galera_nodes (str): A space-separated list of node addresses (address[:port]) in the cluster # (default empty). # garb_galera_group (str): Galera cluster name, should be the same as on the rest of the nodes. # (default empty). # Optional: # garb_galera_options (str): Optional Galera internal options string (e.g. SSL settings) # see http://www.codership.com/wiki/doku.php?id=galera_parameters # (default empty). # garb_log_file (str): Log file for garbd (default empty). Optional, by default logs to syslog # garb_pid_file (str): Custum PID file path and name. # Default to "/var/run/garb.pid". # . /etc/rc.subr name="garb" rcvar=garb_enable load_rc_config $name # set defaults : ${garb_enable="NO"} : ${garb_galera_nodes=""} : ${garb_galera_group=""} : ${garb_galera_options=""} : ${garb_log_file=""} : ${garb_pid_file="/var/run/garb.pid"} : ${garb_working_directory=""} procname="/usr/local/bin/garbd" command="/usr/sbin/daemon" command_args="-c -f -u nobody -p $garb_pid_file $procname" start_precmd="${name}_prestart" #start_cmd="${name}_start" start_postcmd="${name}_poststart" stop_precmd="${name}_prestop" #stop_cmd="${name}_stop" #stop_postcmd="${name}_poststop" #extra_commands="reload" #reload_cmd="${name}_reload" export LD_LIBRARY_PATH=/usr/local/lib/gcc44 garb_prestart() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to start $name" [ -r "$garb_pid_file" ] && err 0 "$procname is already running with PID $(cat $garb_pid_file)" [ -x "$procname" ] || err 5 "$procname is not found" # check that node addresses are configured [ -z "$garb_galera_nodes" ] && err 6 "List of garb_galera_nodes is not configured" [ -z "$garb_galera_group" ] && err 6 "garb_galera_group name is not configured" GALERA_PORT=${GALERA_PORT:-4567} # Concatenate all nodes in the list (for backward compatibility) ADDRESS= for NODE in ${garb_galera_nodes}; do [ -z "$ADDRESS" ] && ADDRESS="$NODE" || ADDRESS="$ADDRESS,$NODE" done command_args="$command_args -a gcomm://$ADDRESS" [ -n "$garb_galera_group" ] && command_args="$command_args -g $garb_galera_group" [ -n "$garb_galera_options" ] && command_args="$command_args -o $garb_galera_options" [ -n "$garb_log_file" ] && command_args="$command_args -l $garb_log_file" [ -n "$garb_working_directory" ] && command_args="$command_args -w $garb_working_directory" return 0 } garb_poststart() { local timeout=15 while [ ! -f "$garb_pid_file" -a $timeout -gt 0 ]; do timeout=$(( timeout - 1 )) sleep 1 done return 0 } garb_prestop() { [ "$(id -ur)" != "0" ] && err 4 "root rights are required to stop $name" [ -r $garb_pid_file ] || err 0 "" return 0 } run_rc_command "$1" galera-4-26.4.25/garb/files/garb.service000644 000164 177776 00000000736 15107057155 021034 0ustar00jenkinsnogroup000000 000000 # Systemd service file for garbd [Unit] Description=Galera Arbitrator Daemon Documentation=man:garbd(8) Documentation=https://galeracluster.com/library/documentation/arbitrator.html After=network.target [Install] WantedBy=multi-user.target Alias=garbd.service [Service] User=nobody ExecStart=/usr/bin/garb-systemd start # Use SIGINT because with the default SIGTERM # garbd fails to reliably transition to 'destroyed' state KillSignal=SIGINT TimeoutSec=2m PrivateTmp=false galera-4-26.4.25/garb/files/garb.sh000755 000164 177776 00000007331 15107057155 020007 0ustar00jenkinsnogroup000000 000000 #!/bin/bash # # Copyright (C) 2012-2015 Codership Oy # # init.d script for garbd # # chkconfig: - 99 01 # config: /etc/sysconfig/garb | /etc/default/garb ### BEGIN INIT INFO # Provides: garb # Required-Start: $remote_fs $syslog # Required-Stop: $remote_fs $syslog # Should-Start: $network $named $time # Should-Stop: $network $named $time # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Galera Arbitrator Daemon # Description: The Galera Arbitrator is used as part of clusters # that have only two real Galera servers and need an # extra node to arbitrate split brain situations. ### END INIT INFO # Source function library. if [ -f /etc/redhat-release ]; then . /etc/init.d/functions . /etc/sysconfig/network config=/etc/sysconfig/garb else . /lib/lsb/init-functions config=/etc/default/garb fi log_failure() { if [ -f /etc/redhat-release ]; then echo -n $* failure "$*" echo else log_failure_msg "$*" fi } PIDFILE=/var/run/garbd prog=$(which garbd) program_start() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Starting $prog: " daemon --user nobody $prog "$@" >/dev/null rcode=$? if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi [ $rcode -eq 0 ] && echo_success || echo_failure echo else log_daemon_msg "Starting $prog: " start-stop-daemon --start --quiet -c nobody --background \ --exec $prog -- "$@" rcode=$? # Hack: sleep a bit to give garbd some time to fork sleep 1 if [ $rcode -eq 0 ]; then pidof $prog > $PIDFILE || rcode=$? fi log_end_msg $rcode fi return $rcode } program_stop() { local rcode if [ -f /etc/redhat-release ]; then echo -n $"Shutting down $prog: " killproc -p $PIDFILE rcode=$? [ $rcode -eq 0 ] && echo_success || echo_failure else start-stop-daemon --stop --quiet --oknodo --retry TERM/30/KILL/5 \ --pidfile $PIDFILE rcode=$? log_end_msg $rcode fi [ $rcode -eq 0 ] && rm -f $PIDFILE return $rcode } program_status() { if [ -f /etc/redhat-release ]; then status $prog else status_of_proc -p $PIDFILE "$prog" garb fi } start() { [ "$EUID" != "0" ] && return 4 [ "$NETWORKING" = "no" ] && return 1 if grep -q -E '^# REMOVE' $config; then log_failure "Garbd config $config is not configured yet" return 0 fi if [ -r $PIDFILE ]; then local PID=$(cat ${PIDFILE}) if ps -p $PID >/dev/null 2>&1; then log_failure "$prog is already running with PID $PID" return 3 # ESRCH else rm -f $PIDFILE fi fi [ -x $prog ] || return 5 [ -f $config ] && . $config # Check that node addresses are configured if [ -z "$GALERA_NODES" ]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [ -z "$GALERA_GROUP" ]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-d -a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "$GALERA_GROUP" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "$GALERA_OPTIONS" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "$LOG_FILE" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" [ -n "$WORK_DIR" ] && OPTIONS="$OPTIONS -w '$WORK_DIR'" eval program_start $OPTIONS } stop() { [ "$EUID" != "0" ] && return 4 [ -r $PIDFILE ] || return 3 # ESRCH program_stop } restart() { stop start } # See how we were called. case "$1" in start) start ;; stop) stop ;; status) program_status ;; restart|reload|force-reload) restart ;; condrestart) if status $prog > /dev/null; then stop start fi ;; *) echo $"Usage: $0 {start|stop|status|restart|reload}" exit 2 esac galera-4-26.4.25/garb/files/garb.cnf000644 000164 177776 00000001060 15107057155 020131 0ustar00jenkinsnogroup000000 000000 # Copyright (C) 2012 Codership Oy # This config file is to be sourced by garb service script. # A comma-separated list of node addresses (address[:port]) in the cluster # GALERA_NODES="" # Galera cluster name, should be the same as on the rest of the nodes. # GALERA_GROUP="" # Optional Galera internal options string (e.g. SSL settings) # see https://galeracluster.com/library/documentation/galera-parameters.html # GALERA_OPTIONS="" # Log file for garbd. Optional, by default logs to syslog # LOG_FILE="" # Where to persist necessary data # WORK_DIR="" galera-4-26.4.25/garb/files/garb-systemd000755 000164 177776 00000002311 15107057155 021055 0ustar00jenkinsnogroup000000 000000 #!/bin/bash -ue # if [[ -f /etc/debian_version ]]; then config=/etc/default/garb else config=/etc/sysconfig/garb fi log_failure() { echo " ERROR! $@" } program_start() { echo "Starting garbd" /usr/bin/garbd "$@" } start() { if grep -q -E '^# REMOVE' $config; then log_failure "Garbd config $config is not configured yet" return 0 fi [ -f $config ] && . $config # Check that node addresses are configured if [[ -z "${GALERA_NODES:-}" ]]; then log_failure "List of GALERA_NODES is not configured" return 6 fi if [[ -z "${GALERA_GROUP:-}" ]]; then log_failure "GALERA_GROUP name is not configured" return 6 fi GALERA_PORT=${GALERA_PORT:-4567} OPTIONS="-a gcomm://${GALERA_NODES// /,}" # substitute space with comma for backward compatibility [ -n "${GALERA_GROUP:-}" ] && OPTIONS="$OPTIONS -g '$GALERA_GROUP'" [ -n "${GALERA_OPTIONS:-}" ] && OPTIONS="$OPTIONS -o '$GALERA_OPTIONS'" [ -n "${LOG_FILE:-}" ] && OPTIONS="$OPTIONS -l '$LOG_FILE'" [ -n "${WORK_DIR:-}" ] && OPTIONS="$OPTIONS -w '$WORK_DIR'" eval program_start $OPTIONS } # See how we were called. case "$1" in start) start ;; *) echo $"Usage: $0 {start}" exit 2 ;; esac exit $? galera-4-26.4.25/garb/garb_logger.hpp000644 000164 177776 00000000444 15107057155 020414 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011 Codership Oy */ #ifndef _GARB_LOGGER_HPP_ #define _GARB_LOGGER_HPP_ #include namespace garb { extern void set_logfile (const std::string& fname); extern void set_syslog (); } /* namespace garb */ #endif /* _GARB_LOGGER_HPP_ */ galera-4-26.4.25/garb/garb_main.cpp000644 000164 177776 00000006521 15107057155 020056 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2025 Codership Oy */ #include "garb_config.hpp" #include "garb_recv_loop.hpp" #include #include #include // exit() #include // setsid(), chdir() #include // open() #include // sigaction namespace garb { void become_daemon (const std::string& workdir) { if (chdir("/")) // detach from potentially removable block devices { gu_throw_system_error(errno) << "chdir(" << workdir << ") failed"; } if (!workdir.empty() && chdir(workdir.c_str())) { gu_throw_system_error(errno) << "chdir(" << workdir << ") failed"; } if (pid_t pid = fork()) { if (pid > 0) // parent { exit(0); } else { // I guess we want this to go to stderr as well; std::cerr << "Failed to fork daemon process: " << errno << " (" << strerror(errno) << ")"; gu_throw_system_error(errno) << "Failed to fork daemon process"; } } // child if (setsid()<0) // become a new process leader, detach from terminal { gu_throw_system_error(errno) << "setsid() failed"; } // umask(0); // A second fork ensures the process cannot acquire a controlling // terminal. if (pid_t pid = fork()) { if (pid > 0) { exit(0); } else { gu_throw_system_error(errno) << "Second fork failed"; } } // Close the standard streams. This decouples the daemon from the // terminal that started it. close(0); close(1); close(2); // Bind standard fds (0, 1, 2) to /dev/null for (int fd = 0; fd < 3; ++fd) { if (open("/dev/null", O_RDONLY) < 0) { // Avoid leaking file descriptors if (fd == 2) close(1); if (fd > 0) close(0); gu_throw_system_error(errno) << "Unable to open /dev/null for fd " << fd; } } char* wd(static_cast(::malloc(PATH_MAX))); if (wd) { log_info << "Currend WD: " << getcwd(wd, PATH_MAX); ::free(wd); } } int main (int argc, char* argv[]) { Config config(argc, argv); if (config.exit()) return 0; log_info << "Read config: " << config << std::endl; if (config.daemon()) become_daemon(config.workdir()); try { /* Ignore SIGPIPE which could be raised when cluster connections are closed abruptly. */ struct sigaction isa; memset (&isa, 0, sizeof(isa)); isa.sa_handler = SIG_IGN; if (sigaction (SIGPIPE, &isa, NULL)) { gu_throw_system_error(errno) << "Falied to install signal handler for signal " << "SIGPIPE"; } RecvLoop loop (config); return 0; } catch (std::exception& e) { log_fatal << "Exception in creating receive loop: " << e.what(); } catch (...) { log_fatal << "Exception in creating receive loop."; } return EXIT_FAILURE; } } /* namespace garb */ int main (int argc, char* argv[]) { try { return garb::main (argc, argv); } catch (std::exception& e) { log_fatal << e.what(); return 1; } } galera-4-26.4.25/garb/garb_config.hpp000644 000164 177776 00000002624 15107057155 020404 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2013 Codership Oy */ #ifndef _GARB_CONFIG_HPP_ #define _GARB_CONFIG_HPP_ #include #include namespace garb { class Config { public: static std::string const DEFAULT_SST; // default (empty) SST request Config (int argc, char* argv[]); ~Config () {} bool daemon() const { return daemon_ ; } const std::string& name() const { return name_ ; } const std::string& address() const { return address_; } const std::string& group() const { return group_ ; } const std::string& sst() const { return sst_ ; } const std::string& donor() const { return donor_ ; } const std::string& options() const { return options_; } const std::string& cfg() const { return cfg_ ; } const std::string& log() const { return log_ ; } const std::string& workdir() const { return workdir_; } bool exit() const { return exit_ ; } private: bool daemon_; std::string name_; std::string address_; std::string group_; std::string sst_; std::string donor_; std::string options_; std::string log_; std::string cfg_; std::string workdir_; bool exit_; /* Exit on --help or --version */ }; /* class Config */ std::ostream& operator << (std::ostream&, const Config&); } /* namespace garb */ #endif /* _GARB_CONFIG_HPP_ */ galera-4-26.4.25/garb/garb_logger.cpp000644 000164 177776 00000002135 15107057155 020406 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011 Codership Oy */ #include "garb_logger.hpp" #include #include #include #include namespace garb { void set_logfile (const std::string& fname) { FILE* log_file = fopen (fname.c_str(), "a"); if (!log_file) { gu_throw_error (ENOENT) << "Failed to open '" << fname << "' for appending"; } gu_conf_set_log_file (log_file); } static void log_to_syslog (int level, const char* msg) { int p = LOG_NOTICE; switch (level) { case GU_LOG_FATAL: p = LOG_CRIT; break; case GU_LOG_ERROR: p = LOG_ERR; break; case GU_LOG_WARN: p = LOG_WARNING; break; case GU_LOG_INFO: p = LOG_INFO; break; case GU_LOG_DEBUG: p = LOG_DEBUG; break; } syslog (p | LOG_DAEMON, "%s", msg); } void set_syslog () { openlog ("garbd", LOG_PID, LOG_DAEMON); gu_conf_set_log_callback (log_to_syslog); } } /* namespace garb */ galera-4-26.4.25/garb/SConscript000644 000164 177776 00000002724 15107057155 017446 0ustar00jenkinsnogroup000000 000000 # Copyright (C) 2011 Codership Oy Import('env', 'libboost_program_options') garb_env = env.Clone() # Include paths garb_env.Append(CPPPATH = Split(''' # #/common #/galerautils/src #/gcs/src ''')) garb_env.Append(CPPFLAGS = ' -DGCS_FOR_GARB') garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils.a')) garb_env.Prepend(LIBS=File('#/galerautils/src/libgalerautils++.a')) garb_env.Prepend(LIBS=File('#/gcomm/src/libgcomm.a')) garb_env.Prepend(LIBS=File('#/gcs/src/libgcs4garb.a')) if libboost_program_options: garb_env.Append(LIBS=libboost_program_options) # special environment for garb_config.cpp conf_env = garb_env.Clone() Import('GALERA_VER', 'GALERA_REV') conf_env.Append(CPPFLAGS = ' -DGALERA_VER=\\"' + GALERA_VER + '\\"') conf_env.Append(CPPFLAGS = ' -DGALERA_REV=\\"' + GALERA_REV + '\\"') garb = garb_env.Program(target = 'garbd', source = Split(''' garb_logger.cpp garb_gcs.cpp garb_recv_loop.cpp garb_main.cpp ''') + conf_env.SharedObject(['garb_config.cpp']) ) galera-4-26.4.25/garb/garb_recv_loop.cpp000644 000164 177776 00000007032 15107057155 021120 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2023 Codership Oy */ #include "garb_recv_loop.hpp" #include namespace garb { static Gcs* global_gcs(0); void signal_handler (int signum) { log_info << "Received signal " << signum; global_gcs->close(); } void RecvLoop::close_connection() { if (!closed_) { gcs_.close(); closed_ = true; } } RecvLoop::RecvLoop (const Config& config) : config_(config), gconf_ (), params_(gconf_), parse_ (gconf_, config_.options()), gcs_ (gconf_, config_.name(), config_.address(), config_.group()), uuid_ (GU_UUID_NIL), seqno_ (GCS_SEQNO_ILL), proto_ (0), closed_(false) { /* set up signal handlers */ global_gcs = &gcs_; struct sigaction sa; memset (&sa, 0, sizeof(sa)); sa.sa_handler = signal_handler; if (sigaction (SIGTERM, &sa, NULL)) { gu_throw_system_error(errno) << "Falied to install signal handler for signal " << "SIGTERM"; } if (sigaction (SIGINT, &sa, NULL)) { gu_throw_system_error(errno) << "Failed to install signal handler for signal " << "SIGINT"; } loop(); } /* return true to exit loop */ bool RecvLoop::one_loop() { gcs_action act; gcs_.recv (act); switch (act.type) { case GCS_ACT_WRITESET: seqno_ = act.seqno_g; if (gu_unlikely(proto_ == 0 && !(seqno_ & 127))) { /* report_interval_ of 128 in old protocol */ gcs_.set_last_applied (gu::GTID(uuid_, seqno_)); } break; case GCS_ACT_COMMIT_CUT: break; case GCS_ACT_STATE_REQ: /* we can't donate state */ gcs_.join (gu::GTID(uuid_, seqno_),-ENOSYS); break; case GCS_ACT_CCHANGE: { gcs_act_cchange const cc(act.buf, act.size); if (cc.conf_id > 0) /* PC */ { int const my_idx(act.seqno_g); assert(my_idx >= 0); gcs_node_state const my_state(cc.memb[my_idx].state_); if (GCS_NODE_STATE_PRIM == my_state) { uuid_ = cc.uuid; seqno_ = cc.seqno; gcs_.request_state_transfer (config_.sst(),config_.donor()); gcs_.join(gu::GTID(cc.uuid, cc.seqno), 0); } proto_ = gcs_.proto_ver(); } else { if (cc.memb.size() == 0) // SELF-LEAVE after closing connection { log_info << "Exiting main loop"; return true; } uuid_ = GU_UUID_NIL; seqno_ = GCS_SEQNO_ILL; } if (config_.sst() != Config::DEFAULT_SST) { // we requested custom SST, so we're done here close_connection(); } break; } case GCS_ACT_INCONSISTENCY: // something went terribly wrong, restart needed close_connection(); break; case GCS_ACT_JOIN: case GCS_ACT_SYNC: case GCS_ACT_FLOW: case GCS_ACT_VOTE: case GCS_ACT_SERVICE: case GCS_ACT_ERROR: case GCS_ACT_UNKNOWN: break; } if (act.buf) { ::free(const_cast(act.buf)); } return false; } void RecvLoop::loop() { while (true) { try { if (one_loop()) return; } catch(gu::Exception& e) { log_error << e.what(); close_connection(); /* continue looping to clear recv queue */ } } } } /* namespace garb */ galera-4-26.4.25/garb/garb_recv_loop.hpp000644 000164 177776 00000002172 15107057155 021125 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2023 Codership Oy */ #ifndef _GARB_RECV_LOOP_HPP_ #define _GARB_RECV_LOOP_HPP_ #include "garb_gcs.hpp" #include "garb_config.hpp" #include #include #include // COMMON_BASE_DIR_KEY #include namespace garb { class RecvLoop { public: RecvLoop (const Config&); ~RecvLoop () {} private: bool one_loop(); void loop(); void close_connection(); const Config& config_; gu::Config gconf_; struct RegisterParams { RegisterParams(gu::Config& cnf) { gu::ssl_register_params(cnf); gcs_register_params(cnf); cnf.add(COMMON_BASE_DIR_KEY); } } params_; struct ParseOptions { ParseOptions(gu::Config& cnf, const std::string& opt) { cnf.parse(opt); gu::ssl_init_options(cnf); } } parse_; Gcs gcs_; gu::UUID uuid_; gu::seqno_t seqno_; int proto_; bool closed_; }; /* RecvLoop */ } /* namespace garb */ #endif /* _GARB_RECV_LOOP_HPP_ */ galera-4-26.4.25/garb/garb_config.cpp000644 000164 177776 00000012020 15107057155 020366 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2025 Codership Oy */ #include "garb_config.hpp" #include "garb_logger.hpp" #include #include #include #include #include #include namespace po = boost::program_options; #include #include namespace garb { static void strip_quotes(std::string& s) { /* stripping no more than one pair of quotes */ if ('"' == *s.begin() && '"' == *s.rbegin()) { std::string stripped(s.substr(1, s.length() - 2)); s = stripped; } } std::string const Config::DEFAULT_SST(WSREP_STATE_TRANSFER_TRIVIAL); Config::Config (int argc, char* argv[]) : daemon_ (false), name_ (GCS_ARBITRATOR_NAME), address_ (), group_ ("my_test_cluster"), sst_ (DEFAULT_SST), donor_ (), options_ (), log_ (), cfg_ (), workdir_ (), exit_ (false) { po::options_description other ("Other options"); other.add_options() ("version,v", "Print version & exit") ("help,h", "Show help message & exit") ; // only these are read from cfg file po::options_description config ("Configuration"); config.add_options() ("daemon,d", "Become daemon") ("name,n", po::value(&name_), "Node name") ("address,a",po::value(&address_), "Group address") ("group,g", po::value(&group_), "Group name") ("sst", po::value(&sst_), "SST request string") ("donor", po::value(&donor_), "SST donor name") ("options,o",po::value(&options_), "GCS/GCOMM option list") ("log,l", po::value(&log_), "Log file") ("workdir,w",po::value(&workdir_), "Daemon working directory") ; po::options_description cfg_opt; cfg_opt.add_options() ("cfg,c", po::value(&cfg_), "Configuration file") ; // these are accepted on the command line po::options_description cmdline_opts; cmdline_opts.add(config).add(cfg_opt).add(other); // we can submit address without option po::positional_options_description p; p.add("address", -1); po::variables_map vm; store(po::command_line_parser(argc, argv). options(cmdline_opts).positional(p).run(), vm); notify(vm); if (vm.count("help")) { std::cerr << "\nUsage: " << argv[0] << " [options] [group address]\n" << cmdline_opts << std::endl; exit_= true; return; } if (vm.count("version")) { log_info << GALERA_VER << ".r" << GALERA_REV; exit_= true; return; } if (vm.count("cfg")) { std::ifstream ifs(cfg_.c_str()); if (!ifs.good()) { gu_throw_error(ENOENT) << "Failed to open configuration file '" << cfg_ << "' for reading."; } store(parse_config_file(ifs, config), vm); notify(vm); } if (!vm.count("address")) { gu_throw_error(EDESTADDRREQ) << "Group address not specified"; } if (!vm.count("group")) { gu_throw_error(EDESTADDRREQ) << "Group name not specified"; } if (vm.count("daemon")) { daemon_ = true; } /* Seeing how https://svn.boost.org/trac/boost/ticket/850 is fixed long and * hard, it becomes clear what an undercooked piece of... cake(?) boost is. * - need to strip quotes manually if used in config file. * (which is done in a very simplistic manner, but should work for most) */ strip_quotes(name_); strip_quotes(address_); strip_quotes(group_); strip_quotes(sst_); strip_quotes(donor_); strip_quotes(options_); strip_quotes(log_); strip_quotes(workdir_); strip_quotes(cfg_); if (options_.length() > 0) options_ += "; "; options_ += "gcs.fc_limit=9999999; gcs.fc_factor=1.0;" "gcs.fc_single_primary=yes; gcs.stateless=yes;"; if (!workdir_.empty()) { options_ += " base_dir=" + workdir_ + ";"; } // this block must be the very last. gu_conf_self_tstamp_on(); if (vm.count("log")) { set_logfile (log_); } else if (daemon_) /* if no log file given AND daemon operation requested - * log to syslog */ { gu_conf_self_tstamp_off(); set_syslog(); } gu_crc32c_configure(); } std::ostream& operator << (std::ostream& os, const Config& c) { os << "\n\tdaemon: " << c.daemon() << "\n\tname: " << c.name() << "\n\taddress: " << c.address() << "\n\tgroup: " << c.group() << "\n\tsst: " << c.sst() << "\n\tdonor: " << c.donor() << "\n\toptions: " << c.options() << "\n\tcfg: " << c.cfg() << "\n\tworkdir: " << c.workdir() << "\n\tlog: " << c.log(); return os; } } galera-4-26.4.25/garb/garb_gcs.hpp000644 000164 177776 00000001537 15107057155 017715 0ustar00jenkinsnogroup000000 000000 /* Copyright (C) 2011-2016 Codership Oy */ #ifndef _GARB_GCS_HPP_ #define _GARB_GCS_HPP_ #include #include namespace garb { class Gcs { public: Gcs (gu::Config& conf, const std::string& name, const std::string& address, const std::string& group); ~Gcs (); void recv (gcs_action& act); void request_state_transfer (const std::string& request, const std::string& donor); void join (const gu::GTID&, int code); void set_last_applied(const gu::GTID&); int proto_ver() const { return gcs_proto_ver(gcs_); } void close (); private: bool closed_; gcs_conn_t* gcs_; Gcs (const Gcs&); Gcs& operator= (const Gcs&); }; /* class Gcs */ } /* namespace garb */ #endif /* _GARB_GCS_HPP_ */ galera-4-26.4.25/GALERA_GIT_REVISION000644 000164 177776 00000000010 15107057156 017403 0ustar00jenkinsnogroup000000 000000 5d07ad0agalera-4-26.4.25/man/000755 000164 177776 00000000000 15107057160 015263 5ustar00jenkinsnogroup000000 000000 galera-4-26.4.25/man/garbd.8000644 000164 177776 00000004353 15107057155 016444 0ustar00jenkinsnogroup000000 000000 .TH GARBD "8" "December 2014" "garbd INFO: 2.8.r165" "System Administration Utilities" .SH NAME garbd \- arbitrator daemon for Galera cluster .SH SYNOPSIS .B garbd [\fI\,options\/\fR] [\fI\,group address\/\fR] .SH DESCRIPTION .B garbd joins Galera cluster as an additional node for the purpose of establishing quorum in case of network partitioning. It can do so by serving: .RS a) as an odd node to prevent split-brains; .RE .RS b) as a reference connection point outside a datacenter. .RE Arbitrator node must see all messages that the other nodes of the cluster see, however it does not process them any further and just discards them. As such it does not store any cluster state and can't be used to bootstrap the cluster, so it only can join existing cluster. .SH OPTIONS .SS "Configuration:" .TP \fB\-d\fR [ \fB\-\-daemon\fR ] Become daemon .TP \fB\-a\fR [ \fB\-\-address\fR ] arg Group address in Galera format .TP \fB\-g\fR [ \fB\-\-group\fR ] arg Group name .TP \fB\-\-sst\fR arg SST request string that contains SST request to trigger state snapshot dump (state backup) on one of the other nodes. For details refer to Galera documentation at https://www.galeracluster.com .TP \fB\-\-donor\fR arg SST donor name (for state dump) .TP \fB\-o\fR [ \fB\-\-options\fR ] arg GCS/GCOMM option list. It is likely to be the same as on other nodes of the cluster. .TP \fB\-l\fR [ \fB\-\-log\fR ] arg Path to log file .TP \fB\-c\fR [ \fB\-\-cfg\fR ] arg Path to configuration file. Configuration file contains garbd options in the form \fB